Update PrimalSolutionComponent

master
Alinson S. Xavier 5 years ago
parent d7aa31f3eb
commit a9dcdb8e4e
No known key found for this signature in database
GPG Key ID: DCA0DAD4D2F58624

@ -266,6 +266,13 @@ class Component(EnforceOverrides):
) -> Dict[Hashable, Dict[str, float]]: ) -> Dict[Hashable, Dict[str, float]]:
return {} return {}
def sample_evaluate(
self,
instance: Optional[Instance],
sample: Sample,
) -> Dict[Hashable, Dict[str, float]]:
return {}
def sample_xy( def sample_xy(
self, self,
instance: Optional[Instance], instance: Optional[Instance],

@ -61,14 +61,13 @@ class PrimalSolutionComponent(Component):
self.classifier_prototype = classifier self.classifier_prototype = classifier
@overrides @overrides
def before_solve_mip_old( def before_solve_mip(
self, self,
solver: "LearningSolver", solver: "LearningSolver",
instance: Instance, instance: Instance,
model: Any, model: Any,
stats: LearningSolveStats, stats: LearningSolveStats,
features: Features, sample: Sample,
training_data: TrainingSample,
) -> None: ) -> None:
logger.info("Predicting primal solution...") logger.info("Predicting primal solution...")
@ -78,7 +77,7 @@ class PrimalSolutionComponent(Component):
return return
# Predict solution and provide it to the solver # Predict solution and provide it to the solver
solution = self.sample_predict(instance, training_data) solution = self.sample_predict(sample)
assert solver.internal_solver is not None assert solver.internal_solver is not None
if self.mode == "heuristic": if self.mode == "heuristic":
solver.internal_solver.fix(solution) solver.internal_solver.fix(solution)
@ -103,15 +102,12 @@ class PrimalSolutionComponent(Component):
f"one: {stats['Primal: One']}" f"one: {stats['Primal: One']}"
) )
def sample_predict( def sample_predict(self, sample: Sample) -> Solution:
self, assert sample.after_load is not None
instance: Instance, assert sample.after_load.variables is not None
sample: TrainingSample,
) -> Solution:
assert instance.features.variables is not None
# Compute y_pred # Compute y_pred
x, _ = self.sample_xy_old(instance, sample) x, _ = self.sample_xy(None, sample)
y_pred = {} y_pred = {}
for category in x.keys(): for category in x.keys():
assert category in self.classifiers, ( assert category in self.classifiers, (
@ -129,9 +125,9 @@ class PrimalSolutionComponent(Component):
).T ).T
# Convert y_pred into solution # Convert y_pred into solution
solution: Solution = {v: None for v in instance.features.variables.keys()} solution: Solution = {v: None for v in sample.after_load.variables.keys()}
category_offset: Dict[Hashable, int] = {cat: 0 for cat in x.keys()} category_offset: Dict[Hashable, int] = {cat: 0 for cat in x.keys()}
for (var_name, var_features) in instance.features.variables.items(): for (var_name, var_features) in sample.after_load.variables.items():
category = var_features.category category = var_features.category
if category not in category_offset: if category not in category_offset:
continue continue
@ -144,42 +140,6 @@ class PrimalSolutionComponent(Component):
return solution return solution
@overrides
def sample_xy_old(
self,
instance: Instance,
sample: TrainingSample,
) -> Tuple[Dict[Category, List[List[float]]], Dict[Category, List[List[float]]]]:
assert instance.features.variables is not None
x: Dict = {}
y: Dict = {}
for (var_name, var_features) in instance.features.variables.items():
category = var_features.category
if category is None:
continue
if category not in x.keys():
x[category] = []
y[category] = []
f: List[float] = []
assert var_features.user_features is not None
f += var_features.user_features
if sample.lp_solution is not None:
lp_value = sample.lp_solution[var_name]
if lp_value is not None:
f += [lp_value]
x[category] += [f]
if sample.solution is not None:
opt_value = sample.solution[var_name]
assert opt_value is not None
assert 0.0 - 1e-5 <= opt_value <= 1.0 + 1e-5, (
f"Variable {var_name} has non-binary value {opt_value} in the "
"optimal solution. Predicting values of non-binary "
"variables is not currently supported. Please set its "
"category to None."
)
y[category] += [[opt_value < 0.5, opt_value >= 0.5]]
return x, y
@overrides @overrides
def sample_xy( def sample_xy(
self, self,
@ -226,18 +186,21 @@ class PrimalSolutionComponent(Component):
return x, y return x, y
@overrides @overrides
def sample_evaluate_old( def sample_evaluate(
self, self,
instance: Instance, _: Optional[Instance],
sample: TrainingSample, sample: Sample,
) -> Dict[Hashable, Dict[str, float]]: ) -> Dict[Hashable, Dict[str, float]]:
solution_actual = sample.solution assert sample.after_mip is not None
assert solution_actual is not None assert sample.after_mip.variables is not None
solution_pred = self.sample_predict(instance, sample)
solution_actual = sample.after_mip.variables
solution_pred = self.sample_predict(sample)
vars_all, vars_one, vars_zero = set(), set(), set() vars_all, vars_one, vars_zero = set(), set(), set()
pred_one_positive, pred_zero_positive = set(), set() pred_one_positive, pred_zero_positive = set(), set()
for (var_name, value_actual) in solution_actual.items(): for (var_name, var) in solution_actual.items():
assert value_actual is not None assert var.value is not None
value_actual = var.value
vars_all.add(var_name) vars_all.add(var_name)
if value_actual > 0.5: if value_actual > 0.5:
vars_one.add(var_name) vars_one.add(var_name)
@ -279,10 +242,3 @@ class PrimalSolutionComponent(Component):
thr.fit(clf, x[category], y[category]) thr.fit(clf, x[category], y[category])
self.classifiers[category] = clf self.classifiers[category] = clf
self.thresholds[category] = thr self.thresholds[category] = thr
@overrides
def fit(
self,
training_instances: List[Instance],
) -> None:
return

@ -1,7 +1,6 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization # MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved. # Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details. # Released under the modified BSD license. See COPYING.md for more details.
from typing import cast
from unittest.mock import Mock from unittest.mock import Mock
import numpy as np import numpy as np
@ -14,15 +13,14 @@ from miplearn.classifiers.threshold import Threshold
from miplearn.components import classifier_evaluation_dict from miplearn.components import classifier_evaluation_dict
from miplearn.components.primal import PrimalSolutionComponent from miplearn.components.primal import PrimalSolutionComponent
from miplearn.features import ( from miplearn.features import (
TrainingSample,
Variable, Variable,
Features, Features,
Sample, Sample,
InstanceFeatures, InstanceFeatures,
) )
from miplearn.instance.base import Instance
from miplearn.problems.tsp import TravelingSalesmanGenerator from miplearn.problems.tsp import TravelingSalesmanGenerator
from miplearn.solvers.learning import LearningSolver from miplearn.solvers.learning import LearningSolver
from miplearn.solvers.tests import assert_equals
@pytest.fixture @pytest.fixture
@ -48,7 +46,7 @@ def sample() -> Sample:
after_mip=Features( after_mip=Features(
variables={ variables={
"x[0]": Variable(value=0.0), "x[0]": Variable(value=0.0),
"x[1]": Variable(value=0.0), "x[1]": Variable(value=1.0),
"x[2]": Variable(value=1.0), "x[2]": Variable(value=1.0),
"x[3]": Variable(value=0.0), "x[3]": Variable(value=0.0),
} }
@ -89,168 +87,6 @@ def test_xy(sample: Sample) -> None:
assert y_actual == y_expected assert y_actual == y_expected
def test_xy_old() -> None:
features = Features(
variables={
"x[0]": Variable(
category="default",
user_features=[0.0, 0.0],
),
"x[1]": Variable(
category=None,
),
"x[2]": Variable(
category="default",
user_features=[1.0, 0.0],
),
"x[3]": Variable(
category="default",
user_features=[1.0, 1.0],
),
}
)
instance = Mock(spec=Instance)
instance.features = features
sample = TrainingSample(
solution={
"x[0]": 0.0,
"x[1]": 1.0,
"x[2]": 1.0,
"x[3]": 0.0,
},
lp_solution={
"x[0]": 0.1,
"x[1]": 0.1,
"x[2]": 0.1,
"x[3]": 0.1,
},
)
x_expected = {
"default": [
[0.0, 0.0, 0.1],
[1.0, 0.0, 0.1],
[1.0, 1.0, 0.1],
]
}
y_expected = {
"default": [
[True, False],
[False, True],
[True, False],
]
}
xy = PrimalSolutionComponent().sample_xy_old(instance, sample)
assert xy is not None
x_actual, y_actual = xy
assert x_actual == x_expected
assert y_actual == y_expected
def test_xy_without_lp_solution_old() -> None:
features = Features(
variables={
"x[0]": Variable(
category="default",
user_features=[0.0, 0.0],
),
"x[1]": Variable(
category=None,
),
"x[2]": Variable(
category="default",
user_features=[1.0, 0.0],
),
"x[3]": Variable(
category="default",
user_features=[1.0, 1.0],
),
}
)
instance = Mock(spec=Instance)
instance.features = features
sample = TrainingSample(
solution={
"x[0]": 0.0,
"x[1]": 1.0,
"x[2]": 1.0,
"x[3]": 0.0,
},
)
x_expected = {
"default": [
[0.0, 0.0],
[1.0, 0.0],
[1.0, 1.0],
]
}
y_expected = {
"default": [
[True, False],
[False, True],
[True, False],
]
}
xy = PrimalSolutionComponent().sample_xy_old(instance, sample)
assert xy is not None
x_actual, y_actual = xy
assert x_actual == x_expected
assert y_actual == y_expected
def test_predict_old() -> None:
clf = Mock(spec=Classifier)
clf.predict_proba = Mock(
return_value=np.array(
[
[0.9, 0.1],
[0.5, 0.5],
[0.1, 0.9],
]
)
)
thr = Mock(spec=Threshold)
thr.predict = Mock(return_value=[0.75, 0.75])
features = Features(
variables={
"x[0]": Variable(
category="default",
user_features=[0.0, 0.0],
),
"x[1]": Variable(
category="default",
user_features=[0.0, 2.0],
),
"x[2]": Variable(
category="default",
user_features=[2.0, 0.0],
),
}
)
instance = Mock(spec=Instance)
instance.features = features
sample = TrainingSample(
lp_solution={
"x[0]": 0.1,
"x[1]": 0.5,
"x[2]": 0.9,
}
)
x, _ = PrimalSolutionComponent().sample_xy_old(instance, sample)
comp = PrimalSolutionComponent()
comp.classifiers = {"default": clf}
comp.thresholds = {"default": thr}
pred = comp.sample_predict(instance, sample)
clf.predict_proba.assert_called_once()
assert_array_equal(x["default"], clf.predict_proba.call_args[0][0])
thr.predict.assert_called_once()
assert_array_equal(x["default"], thr.predict.call_args[0][0])
assert pred == {
"x[0]": 0.0,
"x[1]": None,
"x[2]": 1.0,
}
def test_fit_xy() -> None: def test_fit_xy() -> None:
clf = Mock(spec=Classifier) clf = Mock(spec=Classifier)
clf.clone = lambda: Mock(spec=Classifier) # type: ignore clf.clone = lambda: Mock(spec=Classifier) # type: ignore
@ -295,37 +131,49 @@ def test_usage() -> None:
assert stats["mip_lower_bound"] == stats["mip_warm_start_value"] assert stats["mip_lower_bound"] == stats["mip_warm_start_value"]
def test_evaluate_old() -> None: def test_evaluate(sample: Sample) -> None:
comp = PrimalSolutionComponent() comp = PrimalSolutionComponent()
comp.sample_predict = lambda _, __: { # type: ignore comp.sample_predict = lambda _: { # type: ignore
"x[0]": 1.0, "x[0]": 1.0,
"x[1]": 0.0, "x[1]": 1.0,
"x[2]": 0.0, "x[2]": 0.0,
"x[3]": None, "x[3]": None,
"x[4]": 1.0,
}
features: Features = Features(
variables={
"x[0]": Variable(),
"x[1]": Variable(),
"x[2]": Variable(),
"x[3]": Variable(),
"x[4]": Variable(),
} }
ev = comp.sample_evaluate(None, sample)
assert_equals(
ev,
{
0: classifier_evaluation_dict(tp=0, fp=1, tn=1, fn=2),
1: classifier_evaluation_dict(tp=1, fp=1, tn=1, fn=1),
},
)
def test_predict(sample: Sample) -> None:
clf = Mock(spec=Classifier)
clf.predict_proba = Mock(
return_value=np.array(
[
[0.9, 0.1],
[0.5, 0.5],
[0.1, 0.9],
]
) )
instance = Mock(spec=Instance)
instance.features = features
sample: TrainingSample = TrainingSample(
solution={
"x[0]": 1.0,
"x[1]": 1.0,
"x[2]": 0.0,
"x[3]": 1.0,
"x[4]": 1.0,
}
) )
ev = comp.sample_evaluate_old(instance, sample) thr = Mock(spec=Threshold)
assert ev == { thr.predict = Mock(return_value=[0.75, 0.75])
0: classifier_evaluation_dict(tp=1, fp=1, tn=3, fn=0), comp = PrimalSolutionComponent()
1: classifier_evaluation_dict(tp=2, fp=0, tn=1, fn=2), x, _ = comp.sample_xy(None, sample)
comp.classifiers = {"default": clf}
comp.thresholds = {"default": thr}
pred = comp.sample_predict(sample)
clf.predict_proba.assert_called_once()
thr.predict.assert_called_once()
assert_array_equal(x["default"], clf.predict_proba.call_args[0][0])
assert_array_equal(x["default"], thr.predict.call_args[0][0])
assert pred == {
"x[0]": 0.0,
"x[1]": None,
"x[2]": None,
"x[3]": 1.0,
} }

Loading…
Cancel
Save