mirror of
https://github.com/ANL-CEEESA/MIPLearn.git
synced 2025-12-06 01:18:52 -06:00
Rename methods that use TrainingSample to _old
This commit is contained in:
@@ -25,7 +25,7 @@ class Component:
|
||||
strategy.
|
||||
"""
|
||||
|
||||
def before_solve_lp(
|
||||
def before_solve_lp_old(
|
||||
self,
|
||||
solver: "LearningSolver",
|
||||
instance: Instance,
|
||||
@@ -62,7 +62,7 @@ class Component:
|
||||
"""
|
||||
return
|
||||
|
||||
def after_solve_lp(
|
||||
def after_solve_lp_old(
|
||||
self,
|
||||
solver: "LearningSolver",
|
||||
instance: Instance,
|
||||
@@ -77,7 +77,7 @@ class Component:
|
||||
"""
|
||||
return
|
||||
|
||||
def before_solve_mip(
|
||||
def before_solve_mip_old(
|
||||
self,
|
||||
solver: "LearningSolver",
|
||||
instance: Instance,
|
||||
@@ -92,7 +92,7 @@ class Component:
|
||||
"""
|
||||
return
|
||||
|
||||
def after_solve_mip(
|
||||
def after_solve_mip_old(
|
||||
self,
|
||||
solver: "LearningSolver",
|
||||
instance: Instance,
|
||||
@@ -107,7 +107,7 @@ class Component:
|
||||
"""
|
||||
return
|
||||
|
||||
def sample_xy(
|
||||
def sample_xy_old(
|
||||
self,
|
||||
instance: Instance,
|
||||
sample: TrainingSample,
|
||||
@@ -128,7 +128,7 @@ class Component:
|
||||
for instance in instances:
|
||||
instance.load()
|
||||
for sample in instance.training_data:
|
||||
xy = self.sample_xy(instance, sample)
|
||||
xy = self.sample_xy_old(instance, sample)
|
||||
if xy is None:
|
||||
continue
|
||||
x_sample, y_sample = xy
|
||||
@@ -213,11 +213,11 @@ class Component:
|
||||
for instance in instances:
|
||||
instance.load()
|
||||
for sample in instance.training_data:
|
||||
ev += [self.sample_evaluate(instance, sample)]
|
||||
ev += [self.sample_evaluate_old(instance, sample)]
|
||||
instance.free()
|
||||
return ev
|
||||
|
||||
def sample_evaluate(
|
||||
def sample_evaluate_old(
|
||||
self,
|
||||
instance: Instance,
|
||||
sample: TrainingSample,
|
||||
|
||||
@@ -79,7 +79,7 @@ class DynamicConstraintsComponent(Component):
|
||||
return x, y, cids
|
||||
|
||||
@overrides
|
||||
def sample_xy(
|
||||
def sample_xy_old(
|
||||
self,
|
||||
instance: "Instance",
|
||||
sample: TrainingSample,
|
||||
@@ -139,7 +139,7 @@ class DynamicConstraintsComponent(Component):
|
||||
self.thresholds[category].fit(self.classifiers[category], npx, npy)
|
||||
|
||||
@overrides
|
||||
def sample_evaluate(
|
||||
def sample_evaluate_old(
|
||||
self,
|
||||
instance: "Instance",
|
||||
sample: TrainingSample,
|
||||
|
||||
@@ -54,7 +54,7 @@ class DynamicLazyConstraintsComponent(Component):
|
||||
instance.enforce_lazy_constraint(solver.internal_solver, model, cid)
|
||||
|
||||
@overrides
|
||||
def before_solve_mip(
|
||||
def before_solve_mip_old(
|
||||
self,
|
||||
solver: "LearningSolver",
|
||||
instance: Instance,
|
||||
@@ -93,12 +93,12 @@ class DynamicLazyConstraintsComponent(Component):
|
||||
# Delegate ML methods to self.dynamic
|
||||
# -------------------------------------------------------------------
|
||||
@overrides
|
||||
def sample_xy(
|
||||
def sample_xy_old(
|
||||
self,
|
||||
instance: "Instance",
|
||||
sample: TrainingSample,
|
||||
) -> Tuple[Dict, Dict]:
|
||||
return self.dynamic.sample_xy(instance, sample)
|
||||
return self.dynamic.sample_xy_old(instance, sample)
|
||||
|
||||
def sample_predict(
|
||||
self,
|
||||
@@ -120,9 +120,9 @@ class DynamicLazyConstraintsComponent(Component):
|
||||
self.dynamic.fit_xy(x, y)
|
||||
|
||||
@overrides
|
||||
def sample_evaluate(
|
||||
def sample_evaluate_old(
|
||||
self,
|
||||
instance: "Instance",
|
||||
sample: TrainingSample,
|
||||
) -> Dict[Hashable, Dict[str, float]]:
|
||||
return self.dynamic.sample_evaluate(instance, sample)
|
||||
return self.dynamic.sample_evaluate_old(instance, sample)
|
||||
|
||||
@@ -37,7 +37,7 @@ class UserCutsComponent(Component):
|
||||
self.n_added_in_callback = 0
|
||||
|
||||
@overrides
|
||||
def before_solve_mip(
|
||||
def before_solve_mip_old(
|
||||
self,
|
||||
solver: "LearningSolver",
|
||||
instance: "Instance",
|
||||
@@ -79,7 +79,7 @@ class UserCutsComponent(Component):
|
||||
logger.debug(f"Added {len(cids)} violated user cuts")
|
||||
|
||||
@overrides
|
||||
def after_solve_mip(
|
||||
def after_solve_mip_old(
|
||||
self,
|
||||
solver: "LearningSolver",
|
||||
instance: "Instance",
|
||||
@@ -96,12 +96,12 @@ class UserCutsComponent(Component):
|
||||
# Delegate ML methods to self.dynamic
|
||||
# -------------------------------------------------------------------
|
||||
@overrides
|
||||
def sample_xy(
|
||||
def sample_xy_old(
|
||||
self,
|
||||
instance: "Instance",
|
||||
sample: TrainingSample,
|
||||
) -> Tuple[Dict, Dict]:
|
||||
return self.dynamic.sample_xy(instance, sample)
|
||||
return self.dynamic.sample_xy_old(instance, sample)
|
||||
|
||||
def sample_predict(
|
||||
self,
|
||||
@@ -123,9 +123,9 @@ class UserCutsComponent(Component):
|
||||
self.dynamic.fit_xy(x, y)
|
||||
|
||||
@overrides
|
||||
def sample_evaluate(
|
||||
def sample_evaluate_old(
|
||||
self,
|
||||
instance: "Instance",
|
||||
sample: TrainingSample,
|
||||
) -> Dict[Hashable, Dict[str, float]]:
|
||||
return self.dynamic.sample_evaluate(instance, sample)
|
||||
return self.dynamic.sample_evaluate_old(instance, sample)
|
||||
|
||||
@@ -36,7 +36,7 @@ class ObjectiveValueComponent(Component):
|
||||
self.regressor_prototype = regressor
|
||||
|
||||
@overrides
|
||||
def before_solve_mip(
|
||||
def before_solve_mip_old(
|
||||
self,
|
||||
solver: "LearningSolver",
|
||||
instance: Instance,
|
||||
@@ -68,7 +68,7 @@ class ObjectiveValueComponent(Component):
|
||||
sample: TrainingSample,
|
||||
) -> Dict[str, float]:
|
||||
pred: Dict[str, float] = {}
|
||||
x, _ = self.sample_xy(instance, sample)
|
||||
x, _ = self.sample_xy_old(instance, sample)
|
||||
for c in ["Upper bound", "Lower bound"]:
|
||||
if c in self.regressors is not None:
|
||||
pred[c] = self.regressors[c].predict(np.array(x[c]))[0, 0]
|
||||
@@ -77,7 +77,7 @@ class ObjectiveValueComponent(Component):
|
||||
return pred
|
||||
|
||||
@overrides
|
||||
def sample_xy(
|
||||
def sample_xy_old(
|
||||
self,
|
||||
instance: Instance,
|
||||
sample: TrainingSample,
|
||||
@@ -99,7 +99,7 @@ class ObjectiveValueComponent(Component):
|
||||
return x, y
|
||||
|
||||
@overrides
|
||||
def sample_evaluate(
|
||||
def sample_evaluate_old(
|
||||
self,
|
||||
instance: Instance,
|
||||
sample: TrainingSample,
|
||||
|
||||
@@ -60,7 +60,7 @@ class PrimalSolutionComponent(Component):
|
||||
self.classifier_prototype = classifier
|
||||
|
||||
@overrides
|
||||
def before_solve_mip(
|
||||
def before_solve_mip_old(
|
||||
self,
|
||||
solver: "LearningSolver",
|
||||
instance: Instance,
|
||||
@@ -110,7 +110,7 @@ class PrimalSolutionComponent(Component):
|
||||
assert instance.features.variables is not None
|
||||
|
||||
# Compute y_pred
|
||||
x, _ = self.sample_xy(instance, sample)
|
||||
x, _ = self.sample_xy_old(instance, sample)
|
||||
y_pred = {}
|
||||
for category in x.keys():
|
||||
assert category in self.classifiers, (
|
||||
@@ -144,7 +144,7 @@ class PrimalSolutionComponent(Component):
|
||||
return solution
|
||||
|
||||
@overrides
|
||||
def sample_xy(
|
||||
def sample_xy_old(
|
||||
self,
|
||||
instance: Instance,
|
||||
sample: TrainingSample,
|
||||
@@ -180,7 +180,7 @@ class PrimalSolutionComponent(Component):
|
||||
return x, y
|
||||
|
||||
@overrides
|
||||
def sample_evaluate(
|
||||
def sample_evaluate_old(
|
||||
self,
|
||||
instance: Instance,
|
||||
sample: TrainingSample,
|
||||
|
||||
@@ -51,7 +51,7 @@ class StaticLazyConstraintsComponent(Component):
|
||||
self.n_iterations: int = 0
|
||||
|
||||
@overrides
|
||||
def before_solve_mip(
|
||||
def before_solve_mip_old(
|
||||
self,
|
||||
solver: "LearningSolver",
|
||||
instance: "Instance",
|
||||
@@ -85,7 +85,7 @@ class StaticLazyConstraintsComponent(Component):
|
||||
self.n_iterations = 0
|
||||
|
||||
@overrides
|
||||
def after_solve_mip(
|
||||
def after_solve_mip_old(
|
||||
self,
|
||||
solver: "LearningSolver",
|
||||
instance: "Instance",
|
||||
@@ -151,7 +151,7 @@ class StaticLazyConstraintsComponent(Component):
|
||||
) -> List[Hashable]:
|
||||
assert instance.features.constraints is not None
|
||||
|
||||
x, y = self.sample_xy(instance, sample)
|
||||
x, y = self.sample_xy_old(instance, sample)
|
||||
category_to_cids: Dict[Hashable, List[Hashable]] = {}
|
||||
for (cid, cfeatures) in instance.features.constraints.items():
|
||||
if cfeatures.category is None:
|
||||
@@ -174,7 +174,7 @@ class StaticLazyConstraintsComponent(Component):
|
||||
return enforced_cids
|
||||
|
||||
@overrides
|
||||
def sample_xy(
|
||||
def sample_xy_old(
|
||||
self,
|
||||
instance: "Instance",
|
||||
sample: TrainingSample,
|
||||
|
||||
@@ -162,7 +162,7 @@ class LearningSolver:
|
||||
instance.features.__dict__ = features.__dict__
|
||||
sample.after_load = features
|
||||
|
||||
callback_args = (
|
||||
callback_args_old = (
|
||||
self,
|
||||
instance,
|
||||
model,
|
||||
@@ -177,7 +177,7 @@ class LearningSolver:
|
||||
if self.solve_lp:
|
||||
logger.debug("Running before_solve_lp callbacks...")
|
||||
for component in self.components.values():
|
||||
component.before_solve_lp(*callback_args)
|
||||
component.before_solve_lp_old(*callback_args_old)
|
||||
|
||||
logger.info("Solving root LP relaxation...")
|
||||
lp_stats = self.internal_solver.solve_lp(tee=tee)
|
||||
@@ -188,7 +188,7 @@ class LearningSolver:
|
||||
|
||||
logger.debug("Running after_solve_lp callbacks...")
|
||||
for component in self.components.values():
|
||||
component.after_solve_lp(*callback_args)
|
||||
component.after_solve_lp_old(*callback_args_old)
|
||||
|
||||
# Extract features (after-lp)
|
||||
# -------------------------------------------------------
|
||||
@@ -232,7 +232,7 @@ class LearningSolver:
|
||||
# -------------------------------------------------------
|
||||
logger.debug("Running before_solve_mip callbacks...")
|
||||
for component in self.components.values():
|
||||
component.before_solve_mip(*callback_args)
|
||||
component.before_solve_mip_old(*callback_args_old)
|
||||
|
||||
# Solve MIP
|
||||
# -------------------------------------------------------
|
||||
@@ -269,7 +269,7 @@ class LearningSolver:
|
||||
# -------------------------------------------------------
|
||||
logger.debug("Calling after_solve_mip callbacks...")
|
||||
for component in self.components.values():
|
||||
component.after_solve_mip(*callback_args)
|
||||
component.after_solve_mip_old(*callback_args_old)
|
||||
|
||||
# Flush
|
||||
# -------------------------------------------------------
|
||||
|
||||
@@ -10,7 +10,7 @@ from miplearn.instance.base import Instance
|
||||
|
||||
|
||||
def test_xy_instance() -> None:
|
||||
def _sample_xy(features: Features, sample: str) -> Tuple[Dict, Dict]:
|
||||
def _sample_xy_old(features: Features, sample: str) -> Tuple[Dict, Dict]:
|
||||
x = {
|
||||
"s1": {
|
||||
"category_a": [
|
||||
@@ -60,7 +60,7 @@ def test_xy_instance() -> None:
|
||||
instance_2 = Mock(spec=Instance)
|
||||
instance_2.training_data = ["s3"]
|
||||
instance_2.features = {}
|
||||
comp.sample_xy = _sample_xy # type: ignore
|
||||
comp.sample_xy_old = _sample_xy_old # type: ignore
|
||||
x_expected = {
|
||||
"category_a": [
|
||||
[1, 2, 3],
|
||||
|
||||
@@ -160,7 +160,7 @@ def test_sample_predict_evaluate(training_instances: List[Instance]) -> None:
|
||||
training_instances[0].training_data[0],
|
||||
)
|
||||
assert pred == ["c1", "c4"]
|
||||
ev = comp.sample_evaluate(
|
||||
ev = comp.sample_evaluate_old(
|
||||
training_instances[0],
|
||||
training_instances[0].training_data[0],
|
||||
)
|
||||
|
||||
@@ -33,7 +33,7 @@ def features() -> Features:
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample() -> TrainingSample:
|
||||
def sample_old() -> TrainingSample:
|
||||
return TrainingSample(
|
||||
lower_bound=1.0,
|
||||
upper_bound=2.0,
|
||||
@@ -50,7 +50,7 @@ def sample_without_lp() -> TrainingSample:
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_without_ub() -> TrainingSample:
|
||||
def sample_without_ub_old() -> TrainingSample:
|
||||
return TrainingSample(
|
||||
lower_bound=1.0,
|
||||
lp_value=3.0,
|
||||
@@ -59,7 +59,7 @@ def sample_without_ub() -> TrainingSample:
|
||||
|
||||
def test_sample_xy(
|
||||
instance: Instance,
|
||||
sample: TrainingSample,
|
||||
sample_old: TrainingSample,
|
||||
) -> None:
|
||||
x_expected = {
|
||||
"Lower bound": [[1.0, 2.0, 3.0]],
|
||||
@@ -69,7 +69,7 @@ def test_sample_xy(
|
||||
"Lower bound": [[1.0]],
|
||||
"Upper bound": [[2.0]],
|
||||
}
|
||||
xy = ObjectiveValueComponent().sample_xy(instance, sample)
|
||||
xy = ObjectiveValueComponent().sample_xy_old(instance, sample_old)
|
||||
assert xy is not None
|
||||
x_actual, y_actual = xy
|
||||
assert x_actual == x_expected
|
||||
@@ -88,7 +88,7 @@ def test_sample_xy_without_lp(
|
||||
"Lower bound": [[1.0]],
|
||||
"Upper bound": [[2.0]],
|
||||
}
|
||||
xy = ObjectiveValueComponent().sample_xy(instance, sample_without_lp)
|
||||
xy = ObjectiveValueComponent().sample_xy_old(instance, sample_without_lp)
|
||||
assert xy is not None
|
||||
x_actual, y_actual = xy
|
||||
assert x_actual == x_expected
|
||||
@@ -97,14 +97,14 @@ def test_sample_xy_without_lp(
|
||||
|
||||
def test_sample_xy_without_ub(
|
||||
instance: Instance,
|
||||
sample_without_ub: TrainingSample,
|
||||
sample_without_ub_old: TrainingSample,
|
||||
) -> None:
|
||||
x_expected = {
|
||||
"Lower bound": [[1.0, 2.0, 3.0]],
|
||||
"Upper bound": [[1.0, 2.0, 3.0]],
|
||||
}
|
||||
y_expected = {"Lower bound": [[1.0]]}
|
||||
xy = ObjectiveValueComponent().sample_xy(instance, sample_without_ub)
|
||||
xy = ObjectiveValueComponent().sample_xy_old(instance, sample_without_ub_old)
|
||||
assert xy is not None
|
||||
x_actual, y_actual = xy
|
||||
assert x_actual == x_expected
|
||||
@@ -179,9 +179,9 @@ def test_fit_xy_without_ub() -> None:
|
||||
|
||||
def test_sample_predict(
|
||||
instance: Instance,
|
||||
sample: TrainingSample,
|
||||
sample_old: TrainingSample,
|
||||
) -> None:
|
||||
x, y = ObjectiveValueComponent().sample_xy(instance, sample)
|
||||
x, y = ObjectiveValueComponent().sample_xy_old(instance, sample_old)
|
||||
comp = ObjectiveValueComponent()
|
||||
comp.regressors["Lower bound"] = Mock(spec=Regressor)
|
||||
comp.regressors["Upper bound"] = Mock(spec=Regressor)
|
||||
@@ -191,7 +191,7 @@ def test_sample_predict(
|
||||
comp.regressors["Upper bound"].predict = Mock( # type: ignore
|
||||
side_effect=lambda _: np.array([[60.0]])
|
||||
)
|
||||
pred = comp.sample_predict(instance, sample)
|
||||
pred = comp.sample_predict(instance, sample_old)
|
||||
assert pred == {
|
||||
"Lower bound": 50.0,
|
||||
"Upper bound": 60.0,
|
||||
@@ -208,15 +208,15 @@ def test_sample_predict(
|
||||
|
||||
def test_sample_predict_without_ub(
|
||||
instance: Instance,
|
||||
sample_without_ub: TrainingSample,
|
||||
sample_without_ub_old: TrainingSample,
|
||||
) -> None:
|
||||
x, y = ObjectiveValueComponent().sample_xy(instance, sample_without_ub)
|
||||
x, y = ObjectiveValueComponent().sample_xy_old(instance, sample_without_ub_old)
|
||||
comp = ObjectiveValueComponent()
|
||||
comp.regressors["Lower bound"] = Mock(spec=Regressor)
|
||||
comp.regressors["Lower bound"].predict = Mock( # type: ignore
|
||||
side_effect=lambda _: np.array([[50.0]])
|
||||
)
|
||||
pred = comp.sample_predict(instance, sample_without_ub)
|
||||
pred = comp.sample_predict(instance, sample_without_ub_old)
|
||||
assert pred == {
|
||||
"Lower bound": 50.0,
|
||||
}
|
||||
@@ -226,13 +226,13 @@ def test_sample_predict_without_ub(
|
||||
)
|
||||
|
||||
|
||||
def test_sample_evaluate(instance: Instance, sample: TrainingSample) -> None:
|
||||
def test_sample_evaluate(instance: Instance, sample_old: TrainingSample) -> None:
|
||||
comp = ObjectiveValueComponent()
|
||||
comp.regressors["Lower bound"] = Mock(spec=Regressor)
|
||||
comp.regressors["Lower bound"].predict = lambda _: np.array([[1.05]]) # type: ignore
|
||||
comp.regressors["Upper bound"] = Mock(spec=Regressor)
|
||||
comp.regressors["Upper bound"].predict = lambda _: np.array([[2.50]]) # type: ignore
|
||||
ev = comp.sample_evaluate(instance, sample)
|
||||
ev = comp.sample_evaluate_old(instance, sample_old)
|
||||
assert ev == {
|
||||
"Lower bound": {
|
||||
"Actual value": 1.0,
|
||||
|
||||
@@ -68,7 +68,7 @@ def test_xy() -> None:
|
||||
[True, False],
|
||||
]
|
||||
}
|
||||
xy = PrimalSolutionComponent().sample_xy(instance, sample)
|
||||
xy = PrimalSolutionComponent().sample_xy_old(instance, sample)
|
||||
assert xy is not None
|
||||
x_actual, y_actual = xy
|
||||
assert x_actual == x_expected
|
||||
@@ -119,7 +119,7 @@ def test_xy_without_lp_solution() -> None:
|
||||
[True, False],
|
||||
]
|
||||
}
|
||||
xy = PrimalSolutionComponent().sample_xy(instance, sample)
|
||||
xy = PrimalSolutionComponent().sample_xy_old(instance, sample)
|
||||
assert xy is not None
|
||||
x_actual, y_actual = xy
|
||||
assert x_actual == x_expected
|
||||
@@ -164,7 +164,7 @@ def test_predict() -> None:
|
||||
"x[2]": 0.9,
|
||||
}
|
||||
)
|
||||
x, _ = PrimalSolutionComponent().sample_xy(instance, sample)
|
||||
x, _ = PrimalSolutionComponent().sample_xy_old(instance, sample)
|
||||
comp = PrimalSolutionComponent()
|
||||
comp.classifiers = {"default": clf}
|
||||
comp.thresholds = {"default": thr}
|
||||
@@ -253,7 +253,7 @@ def test_evaluate() -> None:
|
||||
"x[4]": 1.0,
|
||||
}
|
||||
)
|
||||
ev = comp.sample_evaluate(instance, sample)
|
||||
ev = comp.sample_evaluate_old(instance, sample)
|
||||
assert ev == {
|
||||
0: classifier_evaluation_dict(tp=1, fp=1, tn=3, fn=0),
|
||||
1: classifier_evaluation_dict(tp=2, fp=0, tn=1, fn=2),
|
||||
|
||||
@@ -116,7 +116,7 @@ def test_usage_with_solver(instance: Instance) -> None:
|
||||
stats: LearningSolveStats = {}
|
||||
|
||||
# LearningSolver calls before_solve_mip
|
||||
component.before_solve_mip(
|
||||
component.before_solve_mip_old(
|
||||
solver=solver,
|
||||
instance=instance,
|
||||
model=None,
|
||||
@@ -154,7 +154,7 @@ def test_usage_with_solver(instance: Instance) -> None:
|
||||
internal.add_constraint.assert_not_called()
|
||||
|
||||
# LearningSolver calls after_solve_mip
|
||||
component.after_solve_mip(
|
||||
component.after_solve_mip_old(
|
||||
solver=solver,
|
||||
instance=instance,
|
||||
model=None,
|
||||
@@ -250,7 +250,7 @@ def test_sample_xy(
|
||||
"type-a": [[False, True], [False, True], [True, False]],
|
||||
"type-b": [[False, True]],
|
||||
}
|
||||
xy = StaticLazyConstraintsComponent().sample_xy(instance, sample)
|
||||
xy = StaticLazyConstraintsComponent().sample_xy_old(instance, sample)
|
||||
assert xy is not None
|
||||
x_actual, y_actual = xy
|
||||
assert x_actual == x_expected
|
||||
|
||||
Reference in New Issue
Block a user