mirror of
https://github.com/ANL-CEEESA/MIPLearn.git
synced 2025-12-06 09:28:51 -06:00
Redesign component.evaluate
This commit is contained in:
@@ -1,12 +1,13 @@
|
|||||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||||
# Released under the modified BSD license. See COPYING.md for more details.
|
# Released under the modified BSD license. See COPYING.md for more details.
|
||||||
|
from typing import Dict
|
||||||
|
|
||||||
|
|
||||||
def classifier_evaluation_dict(tp, tn, fp, fn):
|
def classifier_evaluation_dict(tp: int, tn: int, fp: int, fn: int) -> Dict:
|
||||||
p = tp + fn
|
p = tp + fn
|
||||||
n = fp + tn
|
n = fp + tn
|
||||||
d = {
|
d: Dict = {
|
||||||
"Predicted positive": fp + tp,
|
"Predicted positive": fp + tp,
|
||||||
"Predicted negative": fn + tn,
|
"Predicted negative": fn + tn,
|
||||||
"Condition positive": p,
|
"Condition positive": p,
|
||||||
|
|||||||
@@ -106,7 +106,7 @@ class Component:
|
|||||||
return
|
return
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def xy(
|
def sample_xy(
|
||||||
features: Features,
|
features: Features,
|
||||||
sample: TrainingSample,
|
sample: TrainingSample,
|
||||||
) -> Tuple[Dict, Dict]:
|
) -> Tuple[Dict, Dict]:
|
||||||
@@ -127,7 +127,7 @@ class Component:
|
|||||||
for instance in InstanceIterator(instances):
|
for instance in InstanceIterator(instances):
|
||||||
assert isinstance(instance, Instance)
|
assert isinstance(instance, Instance)
|
||||||
for sample in instance.training_data:
|
for sample in instance.training_data:
|
||||||
xy = self.xy(instance.features, sample)
|
xy = self.sample_xy(instance.features, sample)
|
||||||
if xy is None:
|
if xy is None:
|
||||||
continue
|
continue
|
||||||
x_sample, y_sample = xy
|
x_sample, y_sample = xy
|
||||||
@@ -191,3 +191,13 @@ class Component:
|
|||||||
model: Any,
|
model: Any,
|
||||||
) -> None:
|
) -> None:
|
||||||
return
|
return
|
||||||
|
|
||||||
|
def evaluate(self, instances: Union[List[str], List[Instance]]) -> List:
|
||||||
|
ev = []
|
||||||
|
for instance in InstanceIterator(instances):
|
||||||
|
for sample in instance.training_data:
|
||||||
|
ev += [self.sample_evaluate(instance.features, sample)]
|
||||||
|
return ev
|
||||||
|
|
||||||
|
def sample_evaluate(self, features: Features, sample: TrainingSample) -> Dict:
|
||||||
|
return {}
|
||||||
|
|||||||
@@ -205,7 +205,7 @@ class StaticLazyConstraintsComponent(Component):
|
|||||||
return result
|
return result
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def xy(
|
def sample_xy(
|
||||||
features: Features,
|
features: Features,
|
||||||
sample: TrainingSample,
|
sample: TrainingSample,
|
||||||
) -> Tuple[Dict, Dict]:
|
) -> Tuple[Dict, Dict]:
|
||||||
|
|||||||
@@ -116,45 +116,45 @@ class ObjectiveValueComponent(Component):
|
|||||||
"Upper bound": np.array(ub),
|
"Upper bound": np.array(ub),
|
||||||
}
|
}
|
||||||
|
|
||||||
def evaluate(
|
# def evaluate(
|
||||||
self,
|
# self,
|
||||||
instances: Union[List[str], List[Instance]],
|
# instances: Union[List[str], List[Instance]],
|
||||||
) -> Dict[str, Dict[str, float]]:
|
# ) -> Dict[str, Dict[str, float]]:
|
||||||
y_pred = self.predict(instances)
|
# y_pred = self.predict(instances)
|
||||||
y_true = np.array(
|
# y_true = np.array(
|
||||||
[
|
# [
|
||||||
[
|
# [
|
||||||
inst.training_data[0]["Lower bound"],
|
# inst.training_data[0]["Lower bound"],
|
||||||
inst.training_data[0]["Upper bound"],
|
# inst.training_data[0]["Upper bound"],
|
||||||
]
|
# ]
|
||||||
for inst in InstanceIterator(instances)
|
# for inst in InstanceIterator(instances)
|
||||||
]
|
# ]
|
||||||
)
|
# )
|
||||||
y_pred_lb = y_pred["Lower bound"]
|
# y_pred_lb = y_pred["Lower bound"]
|
||||||
y_pred_ub = y_pred["Upper bound"]
|
# y_pred_ub = y_pred["Upper bound"]
|
||||||
y_true_lb, y_true_ub = y_true[:, 1], y_true[:, 1]
|
# y_true_lb, y_true_ub = y_true[:, 1], y_true[:, 1]
|
||||||
ev = {
|
# ev = {
|
||||||
"Lower bound": {
|
# "Lower bound": {
|
||||||
"Mean squared error": mean_squared_error(y_true_lb, y_pred_lb),
|
# "Mean squared error": mean_squared_error(y_true_lb, y_pred_lb),
|
||||||
"Explained variance": explained_variance_score(y_true_lb, y_pred_lb),
|
# "Explained variance": explained_variance_score(y_true_lb, y_pred_lb),
|
||||||
"Max error": max_error(y_true_lb, y_pred_lb),
|
# "Max error": max_error(y_true_lb, y_pred_lb),
|
||||||
"Mean absolute error": mean_absolute_error(y_true_lb, y_pred_lb),
|
# "Mean absolute error": mean_absolute_error(y_true_lb, y_pred_lb),
|
||||||
"R2": r2_score(y_true_lb, y_pred_lb),
|
# "R2": r2_score(y_true_lb, y_pred_lb),
|
||||||
"Median absolute error": mean_absolute_error(y_true_lb, y_pred_lb),
|
# "Median absolute error": mean_absolute_error(y_true_lb, y_pred_lb),
|
||||||
},
|
# },
|
||||||
"Upper bound": {
|
# "Upper bound": {
|
||||||
"Mean squared error": mean_squared_error(y_true_ub, y_pred_ub),
|
# "Mean squared error": mean_squared_error(y_true_ub, y_pred_ub),
|
||||||
"Explained variance": explained_variance_score(y_true_ub, y_pred_ub),
|
# "Explained variance": explained_variance_score(y_true_ub, y_pred_ub),
|
||||||
"Max error": max_error(y_true_ub, y_pred_ub),
|
# "Max error": max_error(y_true_ub, y_pred_ub),
|
||||||
"Mean absolute error": mean_absolute_error(y_true_ub, y_pred_ub),
|
# "Mean absolute error": mean_absolute_error(y_true_ub, y_pred_ub),
|
||||||
"R2": r2_score(y_true_ub, y_pred_ub),
|
# "R2": r2_score(y_true_ub, y_pred_ub),
|
||||||
"Median absolute error": mean_absolute_error(y_true_ub, y_pred_ub),
|
# "Median absolute error": mean_absolute_error(y_true_ub, y_pred_ub),
|
||||||
},
|
# },
|
||||||
}
|
# }
|
||||||
return ev
|
# return ev
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def xy(
|
def sample_xy(
|
||||||
features: Features,
|
features: Features,
|
||||||
sample: TrainingSample,
|
sample: TrainingSample,
|
||||||
) -> Tuple[Dict, Dict]:
|
) -> Tuple[Dict, Dict]:
|
||||||
|
|||||||
@@ -4,20 +4,16 @@
|
|||||||
|
|
||||||
import logging
|
import logging
|
||||||
from typing import (
|
from typing import (
|
||||||
Union,
|
|
||||||
Dict,
|
Dict,
|
||||||
Callable,
|
|
||||||
List,
|
List,
|
||||||
Hashable,
|
Hashable,
|
||||||
Optional,
|
Optional,
|
||||||
Any,
|
Any,
|
||||||
TYPE_CHECKING,
|
TYPE_CHECKING,
|
||||||
Tuple,
|
Tuple,
|
||||||
cast,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from tqdm.auto import tqdm
|
|
||||||
|
|
||||||
from miplearn.classifiers import Classifier
|
from miplearn.classifiers import Classifier
|
||||||
from miplearn.classifiers.adaptive import AdaptiveClassifier
|
from miplearn.classifiers.adaptive import AdaptiveClassifier
|
||||||
@@ -72,53 +68,39 @@ class PrimalSolutionComponent(Component):
|
|||||||
features: Features,
|
features: Features,
|
||||||
training_data: TrainingSample,
|
training_data: TrainingSample,
|
||||||
) -> None:
|
) -> None:
|
||||||
if len(self.thresholds) > 0:
|
# Do nothing if models are not trained
|
||||||
logger.info("Predicting MIP solution...")
|
if len(self.classifiers) == 0:
|
||||||
solution = self.predict(
|
return
|
||||||
instance.features,
|
|
||||||
instance.training_data[-1],
|
|
||||||
)
|
|
||||||
|
|
||||||
# Update statistics
|
# Predict solution and provide it to the solver
|
||||||
stats["Primal: Free"] = 0
|
logger.info("Predicting MIP solution...")
|
||||||
stats["Primal: Zero"] = 0
|
solution = self.sample_predict(features, training_data)
|
||||||
stats["Primal: One"] = 0
|
assert solver.internal_solver is not None
|
||||||
for (var, var_dict) in solution.items():
|
if self.mode == "heuristic":
|
||||||
for (idx, value) in var_dict.items():
|
solver.internal_solver.fix(solution)
|
||||||
if value is None:
|
else:
|
||||||
stats["Primal: Free"] += 1
|
solver.internal_solver.set_warm_start(solution)
|
||||||
|
|
||||||
|
# Update statistics
|
||||||
|
stats["Primal: Free"] = 0
|
||||||
|
stats["Primal: Zero"] = 0
|
||||||
|
stats["Primal: One"] = 0
|
||||||
|
for (var, var_dict) in solution.items():
|
||||||
|
for (idx, value) in var_dict.items():
|
||||||
|
if value is None:
|
||||||
|
stats["Primal: Free"] += 1
|
||||||
|
else:
|
||||||
|
if value < 0.5:
|
||||||
|
stats["Primal: Zero"] += 1
|
||||||
else:
|
else:
|
||||||
if value < 0.5:
|
stats["Primal: One"] += 1
|
||||||
stats["Primal: Zero"] += 1
|
logger.info(
|
||||||
else:
|
f"Predicted: free: {stats['Primal: Free']}, "
|
||||||
stats["Primal: One"] += 1
|
f"zero: {stats['Primal: Zero']}, "
|
||||||
logger.info(
|
f"one: {stats['Primal: One']}"
|
||||||
f"Predicted: free: {stats['Primal: Free']}, "
|
)
|
||||||
f"zero: {stats['Primal: Zero']}, "
|
|
||||||
f"one: {stats['Primal: One']}"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Provide solution to the solver
|
def sample_predict(
|
||||||
assert solver.internal_solver is not None
|
|
||||||
if self.mode == "heuristic":
|
|
||||||
solver.internal_solver.fix(solution)
|
|
||||||
else:
|
|
||||||
solver.internal_solver.set_warm_start(solution)
|
|
||||||
|
|
||||||
def fit_xy(
|
|
||||||
self,
|
|
||||||
x: Dict[str, np.ndarray],
|
|
||||||
y: Dict[str, np.ndarray],
|
|
||||||
) -> None:
|
|
||||||
for category in x.keys():
|
|
||||||
clf = self.classifier_prototype.clone()
|
|
||||||
thr = self.threshold_prototype.clone()
|
|
||||||
clf.fit(x[category], y[category])
|
|
||||||
thr.fit(clf, x[category], y[category])
|
|
||||||
self.classifiers[category] = clf
|
|
||||||
self.thresholds[category] = thr
|
|
||||||
|
|
||||||
def predict(
|
|
||||||
self,
|
self,
|
||||||
features: Features,
|
features: Features,
|
||||||
sample: TrainingSample,
|
sample: TrainingSample,
|
||||||
@@ -131,7 +113,7 @@ class PrimalSolutionComponent(Component):
|
|||||||
solution[var_name][idx] = None
|
solution[var_name][idx] = None
|
||||||
|
|
||||||
# Compute y_pred
|
# Compute y_pred
|
||||||
x, _ = self.xy(features, sample)
|
x, _ = self.sample_xy(features, sample)
|
||||||
y_pred = {}
|
y_pred = {}
|
||||||
for category in x.keys():
|
for category in x.keys():
|
||||||
assert category in self.classifiers, (
|
assert category in self.classifiers, (
|
||||||
@@ -162,55 +144,8 @@ class PrimalSolutionComponent(Component):
|
|||||||
|
|
||||||
return solution
|
return solution
|
||||||
|
|
||||||
def evaluate(self, instances):
|
|
||||||
ev = {"Fix zero": {}, "Fix one": {}}
|
|
||||||
for instance_idx in tqdm(
|
|
||||||
range(len(instances)),
|
|
||||||
desc="Evaluate (primal)",
|
|
||||||
):
|
|
||||||
instance = instances[instance_idx]
|
|
||||||
solution_actual = instance.training_data[0]["Solution"]
|
|
||||||
solution_pred = self.predict(instance, instance.training_data[0])
|
|
||||||
|
|
||||||
vars_all, vars_one, vars_zero = set(), set(), set()
|
|
||||||
pred_one_positive, pred_zero_positive = set(), set()
|
|
||||||
for (varname, var_dict) in solution_actual.items():
|
|
||||||
if varname not in solution_pred.keys():
|
|
||||||
continue
|
|
||||||
for (idx, value) in var_dict.items():
|
|
||||||
vars_all.add((varname, idx))
|
|
||||||
if value > 0.5:
|
|
||||||
vars_one.add((varname, idx))
|
|
||||||
else:
|
|
||||||
vars_zero.add((varname, idx))
|
|
||||||
if solution_pred[varname][idx] is not None:
|
|
||||||
if solution_pred[varname][idx] > 0.5:
|
|
||||||
pred_one_positive.add((varname, idx))
|
|
||||||
else:
|
|
||||||
pred_zero_positive.add((varname, idx))
|
|
||||||
pred_one_negative = vars_all - pred_one_positive
|
|
||||||
pred_zero_negative = vars_all - pred_zero_positive
|
|
||||||
|
|
||||||
tp_zero = len(pred_zero_positive & vars_zero)
|
|
||||||
fp_zero = len(pred_zero_positive & vars_one)
|
|
||||||
tn_zero = len(pred_zero_negative & vars_one)
|
|
||||||
fn_zero = len(pred_zero_negative & vars_zero)
|
|
||||||
|
|
||||||
tp_one = len(pred_one_positive & vars_one)
|
|
||||||
fp_one = len(pred_one_positive & vars_zero)
|
|
||||||
tn_one = len(pred_one_negative & vars_zero)
|
|
||||||
fn_one = len(pred_one_negative & vars_one)
|
|
||||||
|
|
||||||
ev["Fix zero"][instance_idx] = classifier_evaluation_dict(
|
|
||||||
tp_zero, tn_zero, fp_zero, fn_zero
|
|
||||||
)
|
|
||||||
ev["Fix one"][instance_idx] = classifier_evaluation_dict(
|
|
||||||
tp_one, tn_one, fp_one, fn_one
|
|
||||||
)
|
|
||||||
return ev
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def xy(
|
def sample_xy(
|
||||||
features: Features,
|
features: Features,
|
||||||
sample: TrainingSample,
|
sample: TrainingSample,
|
||||||
) -> Tuple[Dict, Dict]:
|
) -> Tuple[Dict, Dict]:
|
||||||
@@ -246,3 +181,59 @@ class PrimalSolutionComponent(Component):
|
|||||||
)
|
)
|
||||||
y[category] += [[opt_value < 0.5, opt_value >= 0.5]]
|
y[category] += [[opt_value < 0.5, opt_value >= 0.5]]
|
||||||
return x, y
|
return x, y
|
||||||
|
|
||||||
|
def sample_evaluate(
|
||||||
|
self,
|
||||||
|
features: Features,
|
||||||
|
sample: TrainingSample,
|
||||||
|
) -> Dict:
|
||||||
|
solution_actual = sample["Solution"]
|
||||||
|
assert solution_actual is not None
|
||||||
|
solution_pred = self.sample_predict(features, sample)
|
||||||
|
vars_all, vars_one, vars_zero = set(), set(), set()
|
||||||
|
pred_one_positive, pred_zero_positive = set(), set()
|
||||||
|
for (varname, var_dict) in solution_actual.items():
|
||||||
|
if varname not in solution_pred.keys():
|
||||||
|
continue
|
||||||
|
for (idx, value_actual) in var_dict.items():
|
||||||
|
assert value_actual is not None
|
||||||
|
vars_all.add((varname, idx))
|
||||||
|
if value_actual > 0.5:
|
||||||
|
vars_one.add((varname, idx))
|
||||||
|
else:
|
||||||
|
vars_zero.add((varname, idx))
|
||||||
|
value_pred = solution_pred[varname][idx]
|
||||||
|
if value_pred is not None:
|
||||||
|
if value_pred > 0.5:
|
||||||
|
pred_one_positive.add((varname, idx))
|
||||||
|
else:
|
||||||
|
pred_zero_positive.add((varname, idx))
|
||||||
|
pred_one_negative = vars_all - pred_one_positive
|
||||||
|
pred_zero_negative = vars_all - pred_zero_positive
|
||||||
|
return {
|
||||||
|
0: classifier_evaluation_dict(
|
||||||
|
tp=len(pred_zero_positive & vars_zero),
|
||||||
|
tn=len(pred_zero_negative & vars_one),
|
||||||
|
fp=len(pred_zero_positive & vars_one),
|
||||||
|
fn=len(pred_zero_negative & vars_zero),
|
||||||
|
),
|
||||||
|
1: classifier_evaluation_dict(
|
||||||
|
tp=len(pred_one_positive & vars_one),
|
||||||
|
tn=len(pred_one_negative & vars_zero),
|
||||||
|
fp=len(pred_one_positive & vars_zero),
|
||||||
|
fn=len(pred_one_negative & vars_one),
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
def fit_xy(
|
||||||
|
self,
|
||||||
|
x: Dict[str, np.ndarray],
|
||||||
|
y: Dict[str, np.ndarray],
|
||||||
|
) -> None:
|
||||||
|
for category in x.keys():
|
||||||
|
clf = self.classifier_prototype.clone()
|
||||||
|
thr = self.threshold_prototype.clone()
|
||||||
|
clf.fit(x[category], y[category])
|
||||||
|
thr.fit(clf, x[category], y[category])
|
||||||
|
self.classifiers[category] = clf
|
||||||
|
self.thresholds[category] = thr
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ from miplearn import Component, Instance
|
|||||||
|
|
||||||
|
|
||||||
def test_xy_instance():
|
def test_xy_instance():
|
||||||
def _xy_sample(features, sample):
|
def _sample_xy(features, sample):
|
||||||
x = {
|
x = {
|
||||||
"s1": {
|
"s1": {
|
||||||
"category_a": [
|
"category_a": [
|
||||||
@@ -57,7 +57,7 @@ def test_xy_instance():
|
|||||||
instance_2 = Mock(spec=Instance)
|
instance_2 = Mock(spec=Instance)
|
||||||
instance_2.training_data = ["s3"]
|
instance_2.training_data = ["s3"]
|
||||||
instance_2.features = {}
|
instance_2.features = {}
|
||||||
comp.xy = _xy_sample
|
comp.sample_xy = _sample_xy
|
||||||
x_expected = {
|
x_expected = {
|
||||||
"category_a": [
|
"category_a": [
|
||||||
[1, 2, 3],
|
[1, 2, 3],
|
||||||
|
|||||||
@@ -293,7 +293,7 @@ def test_xy_sample() -> None:
|
|||||||
[False, True],
|
[False, True],
|
||||||
],
|
],
|
||||||
}
|
}
|
||||||
xy = StaticLazyConstraintsComponent.xy(features, sample)
|
xy = StaticLazyConstraintsComponent.sample_xy(features, sample)
|
||||||
assert xy is not None
|
assert xy is not None
|
||||||
x_actual, y_actual = xy
|
x_actual, y_actual = xy
|
||||||
assert x_actual == x_expected
|
assert x_actual == x_expected
|
||||||
|
|||||||
@@ -75,35 +75,35 @@ def test_x_y_predict() -> None:
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def test_obj_evaluate():
|
# def test_obj_evaluate():
|
||||||
instances, models = get_test_pyomo_instances()
|
# instances, models = get_test_pyomo_instances()
|
||||||
reg = Mock(spec=Regressor)
|
# reg = Mock(spec=Regressor)
|
||||||
reg.predict = Mock(return_value=np.array([[1000.0], [1000.0]]))
|
# reg.predict = Mock(return_value=np.array([[1000.0], [1000.0]]))
|
||||||
reg.clone = lambda: reg
|
# reg.clone = lambda: reg
|
||||||
comp = ObjectiveValueComponent(
|
# comp = ObjectiveValueComponent(
|
||||||
lb_regressor=reg,
|
# lb_regressor=reg,
|
||||||
ub_regressor=reg,
|
# ub_regressor=reg,
|
||||||
)
|
# )
|
||||||
comp.fit(instances)
|
# comp.fit(instances)
|
||||||
ev = comp.evaluate(instances)
|
# ev = comp.evaluate(instances)
|
||||||
assert ev == {
|
# assert ev == {
|
||||||
"Lower bound": {
|
# "Lower bound": {
|
||||||
"Explained variance": 0.0,
|
# "Explained variance": 0.0,
|
||||||
"Max error": 183.0,
|
# "Max error": 183.0,
|
||||||
"Mean absolute error": 126.5,
|
# "Mean absolute error": 126.5,
|
||||||
"Mean squared error": 19194.5,
|
# "Mean squared error": 19194.5,
|
||||||
"Median absolute error": 126.5,
|
# "Median absolute error": 126.5,
|
||||||
"R2": -5.012843605607331,
|
# "R2": -5.012843605607331,
|
||||||
},
|
# },
|
||||||
"Upper bound": {
|
# "Upper bound": {
|
||||||
"Explained variance": 0.0,
|
# "Explained variance": 0.0,
|
||||||
"Max error": 183.0,
|
# "Max error": 183.0,
|
||||||
"Mean absolute error": 126.5,
|
# "Mean absolute error": 126.5,
|
||||||
"Mean squared error": 19194.5,
|
# "Mean squared error": 19194.5,
|
||||||
"Median absolute error": 126.5,
|
# "Median absolute error": 126.5,
|
||||||
"R2": -5.012843605607331,
|
# "R2": -5.012843605607331,
|
||||||
},
|
# },
|
||||||
}
|
# }
|
||||||
|
|
||||||
|
|
||||||
def test_xy_sample_with_lp() -> None:
|
def test_xy_sample_with_lp() -> None:
|
||||||
@@ -125,7 +125,7 @@ def test_xy_sample_with_lp() -> None:
|
|||||||
"Lower bound": [[1.0]],
|
"Lower bound": [[1.0]],
|
||||||
"Upper bound": [[2.0]],
|
"Upper bound": [[2.0]],
|
||||||
}
|
}
|
||||||
xy = ObjectiveValueComponent.xy(features, sample)
|
xy = ObjectiveValueComponent.sample_xy(features, sample)
|
||||||
assert xy is not None
|
assert xy is not None
|
||||||
x_actual, y_actual = xy
|
x_actual, y_actual = xy
|
||||||
assert x_actual == x_expected
|
assert x_actual == x_expected
|
||||||
@@ -150,7 +150,7 @@ def test_xy_sample_without_lp() -> None:
|
|||||||
"Lower bound": [[1.0]],
|
"Lower bound": [[1.0]],
|
||||||
"Upper bound": [[2.0]],
|
"Upper bound": [[2.0]],
|
||||||
}
|
}
|
||||||
xy = ObjectiveValueComponent.xy(features, sample)
|
xy = ObjectiveValueComponent.sample_xy(features, sample)
|
||||||
assert xy is not None
|
assert xy is not None
|
||||||
x_actual, y_actual = xy
|
x_actual, y_actual = xy
|
||||||
assert x_actual == x_expected
|
assert x_actual == x_expected
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||||
# Released under the modified BSD license. See COPYING.md for more details.
|
# Released under the modified BSD license. See COPYING.md for more details.
|
||||||
|
from typing import Dict
|
||||||
from unittest.mock import Mock
|
from unittest.mock import Mock
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
@@ -10,6 +10,7 @@ from scipy.stats import randint
|
|||||||
|
|
||||||
from miplearn import Classifier, LearningSolver
|
from miplearn import Classifier, LearningSolver
|
||||||
from miplearn.classifiers.threshold import Threshold
|
from miplearn.classifiers.threshold import Threshold
|
||||||
|
from miplearn.components import classifier_evaluation_dict
|
||||||
from miplearn.components.primal import PrimalSolutionComponent
|
from miplearn.components.primal import PrimalSolutionComponent
|
||||||
from miplearn.problems.tsp import TravelingSalesmanGenerator
|
from miplearn.problems.tsp import TravelingSalesmanGenerator
|
||||||
from miplearn.types import TrainingSample, Features
|
from miplearn.types import TrainingSample, Features
|
||||||
@@ -69,7 +70,7 @@ def test_xy() -> None:
|
|||||||
[True, False],
|
[True, False],
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
xy = PrimalSolutionComponent.xy(features, sample)
|
xy = PrimalSolutionComponent.sample_xy(features, sample)
|
||||||
assert xy is not None
|
assert xy is not None
|
||||||
x_actual, y_actual = xy
|
x_actual, y_actual = xy
|
||||||
assert x_actual == x_expected
|
assert x_actual == x_expected
|
||||||
@@ -122,7 +123,7 @@ def test_xy_without_lp_solution() -> None:
|
|||||||
[True, False],
|
[True, False],
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
xy = PrimalSolutionComponent.xy(features, sample)
|
xy = PrimalSolutionComponent.sample_xy(features, sample)
|
||||||
assert xy is not None
|
assert xy is not None
|
||||||
x_actual, y_actual = xy
|
x_actual, y_actual = xy
|
||||||
assert x_actual == x_expected
|
assert x_actual == x_expected
|
||||||
@@ -169,11 +170,11 @@ def test_predict() -> None:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
x, _ = PrimalSolutionComponent.xy(features, sample)
|
x, _ = PrimalSolutionComponent.sample_xy(features, sample)
|
||||||
comp = PrimalSolutionComponent()
|
comp = PrimalSolutionComponent()
|
||||||
comp.classifiers = {"default": clf}
|
comp.classifiers = {"default": clf}
|
||||||
comp.thresholds = {"default": thr}
|
comp.thresholds = {"default": thr}
|
||||||
solution_actual = comp.predict(features, sample)
|
solution_actual = comp.sample_predict(features, sample)
|
||||||
clf.predict_proba.assert_called_once()
|
clf.predict_proba.assert_called_once()
|
||||||
assert_array_equal(x["default"], clf.predict_proba.call_args[0][0])
|
assert_array_equal(x["default"], clf.predict_proba.call_args[0][0])
|
||||||
thr.predict.assert_called_once()
|
thr.predict.assert_called_once()
|
||||||
@@ -229,3 +230,43 @@ def test_usage():
|
|||||||
assert stats["Primal: Free"] == 0
|
assert stats["Primal: Free"] == 0
|
||||||
assert stats["Primal: One"] + stats["Primal: Zero"] == 10
|
assert stats["Primal: One"] + stats["Primal: Zero"] == 10
|
||||||
assert stats["Lower bound"] == stats["Warm start value"]
|
assert stats["Lower bound"] == stats["Warm start value"]
|
||||||
|
|
||||||
|
|
||||||
|
def test_evaluate() -> None:
|
||||||
|
comp = PrimalSolutionComponent()
|
||||||
|
comp.sample_predict = lambda _, __: { # type: ignore
|
||||||
|
"x": {
|
||||||
|
0: 1.0,
|
||||||
|
1: 0.0,
|
||||||
|
2: 0.0,
|
||||||
|
3: None,
|
||||||
|
4: 1.0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
features: Features = {
|
||||||
|
"Variables": {
|
||||||
|
"x": {
|
||||||
|
0: {},
|
||||||
|
1: {},
|
||||||
|
2: {},
|
||||||
|
3: {},
|
||||||
|
4: {},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sample: TrainingSample = {
|
||||||
|
"Solution": {
|
||||||
|
"x": {
|
||||||
|
0: 1.0,
|
||||||
|
1: 1.0,
|
||||||
|
2: 0.0,
|
||||||
|
3: 1.0,
|
||||||
|
4: 1.0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ev = comp.sample_evaluate(features, sample)
|
||||||
|
assert ev == {
|
||||||
|
0: classifier_evaluation_dict(tp=1, fp=1, tn=3, fn=0),
|
||||||
|
1: classifier_evaluation_dict(tp=2, fp=0, tn=1, fn=2),
|
||||||
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user