Update DynamicLazyConstraintsComponent

master
Alinson S. Xavier 5 years ago
parent b5411b8950
commit a4433916e5
No known key found for this signature in database
GPG Key ID: DCA0DAD4D2F58624

@ -196,7 +196,7 @@ class Component(EnforceOverrides):
) -> None: ) -> None:
x, y = self.xy_instances(training_instances) x, y = self.xy_instances(training_instances)
for cat in x.keys(): for cat in x.keys():
x[cat] = np.array(x[cat]) x[cat] = np.array(x[cat], dtype=np.float32)
y[cat] = np.array(y[cat]) y[cat] = np.array(y[cat])
self.fit_xy(x, y) self.fit_xy(x, y)

@ -105,7 +105,10 @@ class DynamicConstraintsComponent(Component):
features.extend(sample.after_lp.instance.to_list()) features.extend(sample.after_lp.instance.to_list())
features.extend(instance.get_constraint_features(cid)) features.extend(instance.get_constraint_features(cid))
for ci in features: for ci in features:
assert isinstance(ci, float) assert isinstance(ci, float), (
f"Constraint features must be a list of floats. "
f"Found {ci.__class__.__name__} instead."
)
x[category].append(features) x[category].append(features)
cids[category].append(cid) cids[category].append(cid)
@ -137,7 +140,7 @@ class DynamicConstraintsComponent(Component):
x, y, _ = self.sample_xy_with_cids(instance, sample) x, y, _ = self.sample_xy_with_cids(instance, sample)
return x, y return x, y
def sample_predict( def sample_predict_old(
self, self,
instance: Instance, instance: Instance,
sample: TrainingSample, sample: TrainingSample,
@ -160,6 +163,29 @@ class DynamicConstraintsComponent(Component):
pred += [cids[category][i]] pred += [cids[category][i]]
return pred return pred
def sample_predict(
self,
instance: Instance,
sample: Sample,
) -> List[Hashable]:
pred: List[Hashable] = []
if len(self.known_cids) == 0:
logger.info("Classifiers not fitted. Skipping.")
return pred
x, _, cids = self.sample_xy_with_cids(instance, sample)
for category in x.keys():
assert category in self.classifiers
assert category in self.thresholds
clf = self.classifiers[category]
thr = self.thresholds[category]
nx = np.array(x[category])
proba = clf.predict_proba(nx)
t = thr.predict(nx)
for i in range(proba.shape[0]):
if proba[i][1] > t[1]:
pred += [cids[category][i]]
return pred
@overrides @overrides
def fit_old(self, training_instances: List[Instance]) -> None: def fit_old(self, training_instances: List[Instance]) -> None:
collected_cids = set() collected_cids = set()
@ -174,6 +200,24 @@ class DynamicConstraintsComponent(Component):
self.known_cids.extend(sorted(collected_cids)) self.known_cids.extend(sorted(collected_cids))
super().fit_old(training_instances) super().fit_old(training_instances)
@overrides
def fit(self, training_instances: List[Instance]) -> None:
collected_cids = set()
for instance in training_instances:
instance.load()
for sample in instance.samples:
if (
sample.after_mip is None
or sample.after_mip.extra is None
or sample.after_mip.extra[self.attr] is None
):
continue
collected_cids |= sample.after_mip.extra[self.attr]
instance.free()
self.known_cids.clear()
self.known_cids.extend(sorted(collected_cids))
super().fit(training_instances)
@overrides @overrides
def fit_xy( def fit_xy(
self, self,
@ -189,12 +233,15 @@ class DynamicConstraintsComponent(Component):
self.thresholds[category].fit(self.classifiers[category], npx, npy) self.thresholds[category].fit(self.classifiers[category], npx, npy)
@overrides @overrides
def sample_evaluate_old( def sample_evaluate(
self, self,
instance: Instance, instance: Instance,
sample: TrainingSample, sample: Sample,
) -> Dict[Hashable, Dict[str, float]]: ) -> Dict[Hashable, Dict[str, float]]:
assert getattr(sample, self.attr) is not None assert sample.after_mip is not None
assert sample.after_mip.extra is not None
assert self.attr in sample.after_mip.extra
actual = sample.after_mip.extra[self.attr]
pred = set(self.sample_predict(instance, sample)) pred = set(self.sample_predict(instance, sample))
tp: Dict[Hashable, int] = {} tp: Dict[Hashable, int] = {}
tn: Dict[Hashable, int] = {} tn: Dict[Hashable, int] = {}
@ -210,12 +257,12 @@ class DynamicConstraintsComponent(Component):
fp[category] = 0 fp[category] = 0
fn[category] = 0 fn[category] = 0
if cid in pred: if cid in pred:
if cid in getattr(sample, self.attr): if cid in actual:
tp[category] += 1 tp[category] += 1
else: else:
fp[category] += 1 fp[category] += 1
else: else:
if cid in getattr(sample, self.attr): if cid in actual:
fn[category] += 1 fn[category] += 1
else: else:
tn[category] += 1 tn[category] += 1

@ -3,7 +3,7 @@
# Released under the modified BSD license. See COPYING.md for more details. # Released under the modified BSD license. See COPYING.md for more details.
import logging import logging
from typing import Dict, List, TYPE_CHECKING, Hashable, Tuple, Any, Optional from typing import Dict, List, TYPE_CHECKING, Hashable, Tuple, Any, Optional, Set
import numpy as np import numpy as np
from overrides import overrides from overrides import overrides
@ -41,6 +41,7 @@ class DynamicLazyConstraintsComponent(Component):
self.classifiers = self.dynamic.classifiers self.classifiers = self.dynamic.classifiers
self.thresholds = self.dynamic.thresholds self.thresholds = self.dynamic.thresholds
self.known_cids = self.dynamic.known_cids self.known_cids = self.dynamic.known_cids
self.lazy_enforced: Set[str] = set()
@staticmethod @staticmethod
def enforce( def enforce(
@ -54,21 +55,33 @@ class DynamicLazyConstraintsComponent(Component):
instance.enforce_lazy_constraint(solver.internal_solver, model, cid) instance.enforce_lazy_constraint(solver.internal_solver, model, cid)
@overrides @overrides
def before_solve_mip_old( def before_solve_mip(
self, self,
solver: "LearningSolver", solver: "LearningSolver",
instance: Instance, instance: Instance,
model: Any, model: Any,
stats: LearningSolveStats, stats: LearningSolveStats,
features: Features, sample: Sample,
training_data: TrainingSample,
) -> None: ) -> None:
training_data.lazy_enforced = set() self.lazy_enforced.clear()
logger.info("Predicting violated (dynamic) lazy constraints...") logger.info("Predicting violated (dynamic) lazy constraints...")
cids = self.dynamic.sample_predict(instance, training_data) cids = self.dynamic.sample_predict(instance, sample)
logger.info("Enforcing %d lazy constraints..." % len(cids)) logger.info("Enforcing %d lazy constraints..." % len(cids))
self.enforce(cids, instance, model, solver) self.enforce(cids, instance, model, solver)
@overrides
def after_solve_mip(
self,
solver: "LearningSolver",
instance: Instance,
model: Any,
stats: LearningSolveStats,
sample: Sample,
) -> None:
assert sample.after_mip is not None
assert sample.after_mip.extra is not None
sample.after_mip.extra["lazy_enforced"] = set(self.lazy_enforced)
@overrides @overrides
def iteration_cb( def iteration_cb(
self, self,
@ -83,23 +96,13 @@ class DynamicLazyConstraintsComponent(Component):
logger.debug("No violations found") logger.debug("No violations found")
return False return False
else: else:
sample = instance.training_data[-1] self.lazy_enforced |= set(cids)
assert sample.lazy_enforced is not None
sample.lazy_enforced |= set(cids)
logger.debug(" %d violations found" % len(cids)) logger.debug(" %d violations found" % len(cids))
self.enforce(cids, instance, model, solver) self.enforce(cids, instance, model, solver)
return True return True
# Delegate ML methods to self.dynamic # Delegate ML methods to self.dynamic
# ------------------------------------------------------------------- # -------------------------------------------------------------------
@overrides
def sample_xy_old(
self,
instance: Instance,
sample: TrainingSample,
) -> Tuple[Dict, Dict]:
return self.dynamic.sample_xy_old(instance, sample)
@overrides @overrides
def sample_xy( def sample_xy(
self, self,
@ -111,13 +114,13 @@ class DynamicLazyConstraintsComponent(Component):
def sample_predict( def sample_predict(
self, self,
instance: Instance, instance: Instance,
sample: TrainingSample, sample: Sample,
) -> List[Hashable]: ) -> List[Hashable]:
return self.dynamic.sample_predict(instance, sample) return self.dynamic.sample_predict(instance, sample)
@overrides @overrides
def fit_old(self, training_instances: List[Instance]) -> None: def fit(self, training_instances: List[Instance]) -> None:
self.dynamic.fit_old(training_instances) self.dynamic.fit(training_instances)
@overrides @overrides
def fit_xy( def fit_xy(
@ -128,9 +131,9 @@ class DynamicLazyConstraintsComponent(Component):
self.dynamic.fit_xy(x, y) self.dynamic.fit_xy(x, y)
@overrides @overrides
def sample_evaluate_old( def sample_evaluate(
self, self,
instance: Instance, instance: Instance,
sample: TrainingSample, sample: Sample,
) -> Dict[Hashable, Dict[str, float]]: ) -> Dict[Hashable, Dict[str, float]]:
return self.dynamic.sample_evaluate_old(instance, sample) return self.dynamic.sample_evaluate(instance, sample)

@ -51,7 +51,7 @@ class UserCutsComponent(Component):
self.enforced.clear() self.enforced.clear()
self.n_added_in_callback = 0 self.n_added_in_callback = 0
logger.info("Predicting violated user cuts...") logger.info("Predicting violated user cuts...")
cids = self.dynamic.sample_predict(instance, training_data) cids = self.dynamic.sample_predict_old(instance, training_data)
logger.info("Enforcing %d user cuts ahead-of-time..." % len(cids)) logger.info("Enforcing %d user cuts ahead-of-time..." % len(cids))
for cid in cids: for cid in cids:
instance.enforce_user_cut(solver.internal_solver, model, cid) instance.enforce_user_cut(solver.internal_solver, model, cid)

@ -62,9 +62,9 @@ class Instance(ABC, EnforceOverrides):
the problem. If two instances map into arrays of different lengths, the problem. If two instances map into arrays of different lengths,
they cannot be solved by the same LearningSolver object. they cannot be solved by the same LearningSolver object.
By default, returns [0]. By default, returns [0.0].
""" """
return [0] return [0.0]
def get_variable_features(self, var_name: VariableName) -> List[float]: def get_variable_features(self, var_name: VariableName) -> List[float]:
""" """
@ -81,9 +81,9 @@ class Instance(ABC, EnforceOverrides):
length for all variables within the same category, for all relevant instances length for all variables within the same category, for all relevant instances
of the problem. of the problem.
By default, returns [0]. By default, returns [0.0].
""" """
return [0] return [0.0]
def get_variable_category(self, var_name: VariableName) -> Optional[Category]: def get_variable_category(self, var_name: VariableName) -> Optional[Category]:
""" """

@ -159,6 +159,7 @@ class LearningSolver:
# ------------------------------------------------------- # -------------------------------------------------------
logger.info("Extracting features (after-load)...") logger.info("Extracting features (after-load)...")
features = FeaturesExtractor(self.internal_solver).extract(instance) features = FeaturesExtractor(self.internal_solver).extract(instance)
features.extra = {}
instance.features.__dict__ = features.__dict__ instance.features.__dict__ = features.__dict__
sample.after_load = features sample.after_load = features
@ -204,6 +205,7 @@ class LearningSolver:
# ------------------------------------------------------- # -------------------------------------------------------
logger.info("Extracting features (after-lp)...") logger.info("Extracting features (after-lp)...")
features = FeaturesExtractor(self.internal_solver).extract(instance) features = FeaturesExtractor(self.internal_solver).extract(instance)
features.extra = {}
features.lp_solve = lp_stats features.lp_solve = lp_stats
sample.after_lp = features sample.after_lp = features
@ -267,6 +269,7 @@ class LearningSolver:
logger.info("Extracting features (after-mip)...") logger.info("Extracting features (after-mip)...")
features = FeaturesExtractor(self.internal_solver).extract(instance) features = FeaturesExtractor(self.internal_solver).extract(instance)
features.mip_solve = mip_stats features.mip_solve = mip_stats
features.extra = {}
sample.after_mip = features sample.after_mip = features
# Add some information to training_sample # Add some information to training_sample

@ -83,15 +83,20 @@ def training_instances() -> List[Instance]:
instances = [cast(Instance, Mock(spec=Instance)) for _ in range(2)] instances = [cast(Instance, Mock(spec=Instance)) for _ in range(2)]
instances[0].samples = [ instances[0].samples = [
Sample( Sample(
after_lp=Features( after_lp=Features(instance=InstanceFeatures()),
instance=InstanceFeatures(),
),
after_mip=Features(extra={"lazy_enforced": {"c1", "c2"}}), after_mip=Features(extra={"lazy_enforced": {"c1", "c2"}}),
) ),
Sample(
after_lp=Features(instance=InstanceFeatures()),
after_mip=Features(extra={"lazy_enforced": {"c2", "c3"}}),
),
] ]
instances[0].samples[0].after_lp.instance.to_list = Mock( # type: ignore instances[0].samples[0].after_lp.instance.to_list = Mock( # type: ignore
return_value=[5.0] return_value=[5.0]
) )
instances[0].samples[1].after_lp.instance.to_list = Mock( # type: ignore
return_value=[5.0]
)
instances[0].get_constraint_category = Mock( # type: ignore instances[0].get_constraint_category = Mock( # type: ignore
side_effect=lambda cid: { side_effect=lambda cid: {
"c1": "type-a", "c1": "type-a",
@ -108,7 +113,30 @@ def training_instances() -> List[Instance]:
"c4": [3.0, 4.0], "c4": [3.0, 4.0],
}[cid] }[cid]
) )
instances[1].samples = [
Sample(
after_lp=Features(instance=InstanceFeatures()),
after_mip=Features(extra={"lazy_enforced": {"c3", "c4"}}),
)
]
instances[1].samples[0].after_lp.instance.to_list = Mock( # type: ignore
return_value=[8.0]
)
instances[1].get_constraint_category = Mock( # type: ignore
side_effect=lambda cid: {
"c1": None,
"c2": "type-a",
"c3": "type-b",
"c4": "type-b",
}[cid]
)
instances[1].get_constraint_features = Mock( # type: ignore
side_effect=lambda cid: {
"c2": [7.0, 8.0, 9.0],
"c3": [5.0, 6.0],
"c4": [7.0, 8.0],
}[cid]
)
return instances return instances
@ -131,11 +159,11 @@ def test_sample_xy(training_instances: List[Instance]) -> None:
assert_equals(y_actual, y_expected) assert_equals(y_actual, y_expected)
def test_fit_old(training_instances_old: List[Instance]) -> None: def test_fit(training_instances: List[Instance]) -> None:
clf = Mock(spec=Classifier) clf = Mock(spec=Classifier)
clf.clone = Mock(side_effect=lambda: Mock(spec=Classifier)) clf.clone = Mock(side_effect=lambda: Mock(spec=Classifier))
comp = DynamicLazyConstraintsComponent(classifier=clf) comp = DynamicLazyConstraintsComponent(classifier=clf)
comp.fit_old(training_instances_old) comp.fit(training_instances)
assert clf.clone.call_count == 2 assert clf.clone.call_count == 2
assert "type-a" in comp.classifiers assert "type-a" in comp.classifiers
@ -145,11 +173,11 @@ def test_fit_old(training_instances_old: List[Instance]) -> None:
clf_a.fit.call_args[0][0], # type: ignore clf_a.fit.call_args[0][0], # type: ignore
np.array( np.array(
[ [
[50.0, 1.0, 2.0, 3.0], [5.0, 1.0, 2.0, 3.0],
[50.0, 4.0, 5.0, 6.0], [5.0, 4.0, 5.0, 6.0],
[50.0, 1.0, 2.0, 3.0], [5.0, 1.0, 2.0, 3.0],
[50.0, 4.0, 5.0, 6.0], [5.0, 4.0, 5.0, 6.0],
[80.0, 7.0, 8.0, 9.0], [8.0, 7.0, 8.0, 9.0],
] ]
), ),
) )
@ -173,12 +201,12 @@ def test_fit_old(training_instances_old: List[Instance]) -> None:
clf_b.fit.call_args[0][0], # type: ignore clf_b.fit.call_args[0][0], # type: ignore
np.array( np.array(
[ [
[50.0, 1.0, 2.0], [5.0, 1.0, 2.0],
[50.0, 3.0, 4.0], [5.0, 3.0, 4.0],
[50.0, 1.0, 2.0], [5.0, 1.0, 2.0],
[50.0, 3.0, 4.0], [5.0, 3.0, 4.0],
[80.0, 5.0, 6.0], [8.0, 5.0, 6.0],
[80.0, 7.0, 8.0], [8.0, 7.0, 8.0],
] ]
), ),
) )
@ -197,7 +225,7 @@ def test_fit_old(training_instances_old: List[Instance]) -> None:
) )
def test_sample_predict_evaluate_old(training_instances_old: List[Instance]) -> None: def test_sample_predict_evaluate(training_instances: List[Instance]) -> None:
comp = DynamicLazyConstraintsComponent() comp = DynamicLazyConstraintsComponent()
comp.known_cids.extend(["c1", "c2", "c3", "c4"]) comp.known_cids.extend(["c1", "c2", "c3", "c4"])
comp.thresholds["type-a"] = MinProbabilityThreshold([0.5, 0.5]) comp.thresholds["type-a"] = MinProbabilityThreshold([0.5, 0.5])
@ -211,15 +239,14 @@ def test_sample_predict_evaluate_old(training_instances_old: List[Instance]) ->
side_effect=lambda _: np.array([[0.9, 0.1], [0.1, 0.9]]) side_effect=lambda _: np.array([[0.9, 0.1], [0.1, 0.9]])
) )
pred = comp.sample_predict( pred = comp.sample_predict(
training_instances_old[0], training_instances[0],
training_instances_old[0].training_data[0], training_instances[0].samples[0],
) )
assert pred == ["c1", "c4"] assert pred == ["c1", "c4"]
ev = comp.sample_evaluate_old( ev = comp.sample_evaluate(
training_instances_old[0], training_instances[0],
training_instances_old[0].training_data[0], training_instances[0].samples[0],
) )
print(ev)
assert ev == { assert ev == {
"type-a": classifier_evaluation_dict(tp=1, fp=0, tn=0, fn=1), "type-a": classifier_evaluation_dict(tp=1, fp=0, tn=0, fn=1),
"type-b": classifier_evaluation_dict(tp=0, fp=1, tn=1, fn=0), "type-b": classifier_evaluation_dict(tp=0, fp=1, tn=1, fn=0),

@ -67,8 +67,9 @@ def test_subtour() -> None:
instance = TravelingSalesmanInstance(n_cities, distances) instance = TravelingSalesmanInstance(n_cities, distances)
solver = LearningSolver() solver = LearningSolver()
solver.solve(instance) solver.solve(instance)
assert instance.training_data[0].lazy_enforced is not None lazy_enforced = instance.samples[0].after_mip.extra["lazy_enforced"]
assert len(instance.training_data[0].lazy_enforced) > 0 assert lazy_enforced is not None
assert len(lazy_enforced) > 0
solution = instance.training_data[0].solution solution = instance.training_data[0].solution
assert solution is not None assert solution is not None
assert solution["x[(0, 1)]"] == 1.0 assert solution["x[(0, 1)]"] == 1.0

Loading…
Cancel
Save