mirror of
https://github.com/ANL-CEEESA/MIPLearn.git
synced 2025-12-06 09:28:51 -06:00
Make all before/solve callbacks receive same parameters
This commit is contained in:
@@ -28,18 +28,35 @@ class Component:
|
|||||||
solver: "LearningSolver",
|
solver: "LearningSolver",
|
||||||
instance: Instance,
|
instance: Instance,
|
||||||
model: Any,
|
model: Any,
|
||||||
|
stats: LearningSolveStats,
|
||||||
|
features: Features,
|
||||||
|
training_data: TrainingSample,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""
|
||||||
Method called by LearningSolver before the root LP relaxation is solved.
|
Method called by LearningSolver before the root LP relaxation is solved.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
solver
|
solver: LearningSolver
|
||||||
The solver calling this method.
|
The solver calling this method.
|
||||||
instance
|
instance: Instance
|
||||||
The instance being solved.
|
The instance being solved.
|
||||||
model
|
model
|
||||||
The concrete optimization model being solved.
|
The concrete optimization model being solved.
|
||||||
|
stats: LearningSolveStats
|
||||||
|
A dictionary containing statistics about the solution process, such as
|
||||||
|
number of nodes explored and running time. Components are free to add
|
||||||
|
their own statistics here. For example, PrimalSolutionComponent adds
|
||||||
|
statistics regarding the number of predicted variables. All statistics in
|
||||||
|
this dictionary are exported to the benchmark CSV file.
|
||||||
|
features: Features
|
||||||
|
Features describing the model.
|
||||||
|
training_data: TrainingSample
|
||||||
|
A dictionary containing data that may be useful for training machine
|
||||||
|
learning models and accelerating the solution process. Components are
|
||||||
|
free to add their own training data here. For example,
|
||||||
|
PrimalSolutionComponent adds the current primal solution. The data must
|
||||||
|
be pickable.
|
||||||
"""
|
"""
|
||||||
return
|
return
|
||||||
|
|
||||||
@@ -49,31 +66,12 @@ class Component:
|
|||||||
instance: Instance,
|
instance: Instance,
|
||||||
model: Any,
|
model: Any,
|
||||||
stats: LearningSolveStats,
|
stats: LearningSolveStats,
|
||||||
|
features: Features,
|
||||||
training_data: TrainingSample,
|
training_data: TrainingSample,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""
|
||||||
Method called by LearningSolver after the root LP relaxation is solved.
|
Method called by LearningSolver after the root LP relaxation is solved.
|
||||||
|
See before_solve_lp for a description of the pameters.
|
||||||
Parameters
|
|
||||||
----------
|
|
||||||
solver: LearningSolver
|
|
||||||
The solver calling this method.
|
|
||||||
instance: Instance
|
|
||||||
The instance being solved.
|
|
||||||
model: Any
|
|
||||||
The concrete optimization model being solved.
|
|
||||||
stats: LearningSolveStats
|
|
||||||
A dictionary containing statistics about the solution process, such as
|
|
||||||
number of nodes explored and running time. Components are free to add
|
|
||||||
their own statistics here. For example, PrimalSolutionComponent adds
|
|
||||||
statistics regarding the number of predicted variables. All statistics in
|
|
||||||
this dictionary are exported to the benchmark CSV file.
|
|
||||||
training_data: TrainingSample
|
|
||||||
A dictionary containing data that may be useful for training machine
|
|
||||||
learning models and accelerating the solution process. Components are
|
|
||||||
free to add their own training data here. For example,
|
|
||||||
PrimalSolutionComponent adds the current primal solution. The data must
|
|
||||||
be pickable.
|
|
||||||
"""
|
"""
|
||||||
return
|
return
|
||||||
|
|
||||||
@@ -82,18 +80,13 @@ class Component:
|
|||||||
solver: "LearningSolver",
|
solver: "LearningSolver",
|
||||||
instance: Instance,
|
instance: Instance,
|
||||||
model: Any,
|
model: Any,
|
||||||
|
stats: LearningSolveStats,
|
||||||
|
features: Features,
|
||||||
|
training_data: TrainingSample,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""
|
||||||
Method called by LearningSolver before the MIP is solved.
|
Method called by LearningSolver before the MIP is solved.
|
||||||
|
See before_solve_lp for a description of the pameters.
|
||||||
Parameters
|
|
||||||
----------
|
|
||||||
solver
|
|
||||||
The solver calling this method.
|
|
||||||
instance
|
|
||||||
The instance being solved.
|
|
||||||
model
|
|
||||||
The concrete optimization model being solved.
|
|
||||||
"""
|
"""
|
||||||
return
|
return
|
||||||
|
|
||||||
@@ -103,31 +96,12 @@ class Component:
|
|||||||
instance: Instance,
|
instance: Instance,
|
||||||
model: Any,
|
model: Any,
|
||||||
stats: LearningSolveStats,
|
stats: LearningSolveStats,
|
||||||
|
features: Features,
|
||||||
training_data: TrainingSample,
|
training_data: TrainingSample,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""
|
||||||
Method called by LearningSolver after the MIP is solved.
|
Method called by LearningSolver after the MIP is solved.
|
||||||
|
See before_solve_lp for a description of the pameters.
|
||||||
Parameters
|
|
||||||
----------
|
|
||||||
solver: LearningSolver
|
|
||||||
The solver calling this method.
|
|
||||||
instance: Instance
|
|
||||||
The instance being solved.
|
|
||||||
model: Any
|
|
||||||
The concrete optimization model being solved.
|
|
||||||
stats: LearningSolveStats
|
|
||||||
A dictionary containing statistics about the solution process, such as
|
|
||||||
number of nodes explored and running time. Components are free to add
|
|
||||||
their own statistics here. For example, PrimalSolutionComponent adds
|
|
||||||
statistics regarding the number of predicted variables. All statistics in
|
|
||||||
this dictionary are exported to the benchmark CSV file.
|
|
||||||
training_data: TrainingSample
|
|
||||||
A dictionary containing data that may be useful for training machine
|
|
||||||
learning models and accelerating the solution process. Components are
|
|
||||||
free to add their own training data here. For example,
|
|
||||||
PrimalSolutionComponent adds the current primal solution. The data must
|
|
||||||
be pickable.
|
|
||||||
"""
|
"""
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|||||||
@@ -33,7 +33,15 @@ class UserCutsComponent(Component):
|
|||||||
self.classifier_prototype: Classifier = classifier
|
self.classifier_prototype: Classifier = classifier
|
||||||
self.classifiers: Dict[Any, Classifier] = {}
|
self.classifiers: Dict[Any, Classifier] = {}
|
||||||
|
|
||||||
def before_solve_mip(self, solver, instance, model):
|
def before_solve_mip(
|
||||||
|
self,
|
||||||
|
solver,
|
||||||
|
instance,
|
||||||
|
model,
|
||||||
|
stats,
|
||||||
|
features,
|
||||||
|
training_data,
|
||||||
|
):
|
||||||
instance.found_violated_user_cuts = []
|
instance.found_violated_user_cuts = []
|
||||||
logger.info("Predicting violated user cuts...")
|
logger.info("Predicting violated user cuts...")
|
||||||
violations = self.predict(instance)
|
violations = self.predict(instance)
|
||||||
@@ -42,16 +50,6 @@ class UserCutsComponent(Component):
|
|||||||
cut = instance.build_user_cut(model, v)
|
cut = instance.build_user_cut(model, v)
|
||||||
solver.internal_solver.add_constraint(cut)
|
solver.internal_solver.add_constraint(cut)
|
||||||
|
|
||||||
def after_solve_mip(
|
|
||||||
self,
|
|
||||||
solver,
|
|
||||||
instance,
|
|
||||||
model,
|
|
||||||
results,
|
|
||||||
training_data,
|
|
||||||
):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def fit(self, training_instances):
|
def fit(self, training_instances):
|
||||||
logger.debug("Fitting...")
|
logger.debug("Fitting...")
|
||||||
features = InstanceFeaturesExtractor().extract(training_instances)
|
features = InstanceFeaturesExtractor().extract(training_instances)
|
||||||
|
|||||||
@@ -33,7 +33,15 @@ class DynamicLazyConstraintsComponent(Component):
|
|||||||
self.classifier_prototype: Classifier = classifier
|
self.classifier_prototype: Classifier = classifier
|
||||||
self.classifiers: Dict[Any, Classifier] = {}
|
self.classifiers: Dict[Any, Classifier] = {}
|
||||||
|
|
||||||
def before_solve_mip(self, solver, instance, model):
|
def before_solve_mip(
|
||||||
|
self,
|
||||||
|
solver,
|
||||||
|
instance,
|
||||||
|
model,
|
||||||
|
stats,
|
||||||
|
features,
|
||||||
|
training_data,
|
||||||
|
):
|
||||||
instance.found_violated_lazy_constraints = []
|
instance.found_violated_lazy_constraints = []
|
||||||
logger.info("Predicting violated lazy constraints...")
|
logger.info("Predicting violated lazy constraints...")
|
||||||
violations = self.predict(instance)
|
violations = self.predict(instance)
|
||||||
@@ -54,16 +62,6 @@ class DynamicLazyConstraintsComponent(Component):
|
|||||||
solver.internal_solver.add_constraint(cut)
|
solver.internal_solver.add_constraint(cut)
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def after_solve_mip(
|
|
||||||
self,
|
|
||||||
solver,
|
|
||||||
instance,
|
|
||||||
model,
|
|
||||||
stats,
|
|
||||||
training_data,
|
|
||||||
):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def fit(self, training_instances):
|
def fit(self, training_instances):
|
||||||
logger.debug("Fitting...")
|
logger.debug("Fitting...")
|
||||||
features = InstanceFeaturesExtractor().extract(training_instances)
|
features = InstanceFeaturesExtractor().extract(training_instances)
|
||||||
|
|||||||
@@ -43,7 +43,15 @@ class StaticLazyConstraintsComponent(Component):
|
|||||||
self.use_two_phase_gap = use_two_phase_gap
|
self.use_two_phase_gap = use_two_phase_gap
|
||||||
self.violation_tolerance = violation_tolerance
|
self.violation_tolerance = violation_tolerance
|
||||||
|
|
||||||
def before_solve_mip(self, solver, instance, model):
|
def before_solve_mip(
|
||||||
|
self,
|
||||||
|
solver,
|
||||||
|
instance,
|
||||||
|
model,
|
||||||
|
stats,
|
||||||
|
features,
|
||||||
|
training_data,
|
||||||
|
):
|
||||||
self.pool = []
|
self.pool = []
|
||||||
if not solver.use_lazy_cb and self.use_two_phase_gap:
|
if not solver.use_lazy_cb and self.use_two_phase_gap:
|
||||||
logger.info("Increasing gap tolerance to %f", self.large_gap)
|
logger.info("Increasing gap tolerance to %f", self.large_gap)
|
||||||
@@ -55,16 +63,6 @@ class StaticLazyConstraintsComponent(Component):
|
|||||||
if instance.has_static_lazy_constraints():
|
if instance.has_static_lazy_constraints():
|
||||||
self._extract_and_predict_static(solver, instance)
|
self._extract_and_predict_static(solver, instance)
|
||||||
|
|
||||||
def after_solve_mip(
|
|
||||||
self,
|
|
||||||
solver,
|
|
||||||
instance,
|
|
||||||
model,
|
|
||||||
stats,
|
|
||||||
training_data,
|
|
||||||
):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def iteration_cb(self, solver, instance, model):
|
def iteration_cb(self, solver, instance, model):
|
||||||
if solver.use_lazy_cb:
|
if solver.use_lazy_cb:
|
||||||
return False
|
return False
|
||||||
|
|||||||
@@ -52,32 +52,20 @@ class ObjectiveValueComponent(Component):
|
|||||||
solver: "LearningSolver",
|
solver: "LearningSolver",
|
||||||
instance: Instance,
|
instance: Instance,
|
||||||
model: Any,
|
model: Any,
|
||||||
|
stats: LearningSolveStats,
|
||||||
|
features: Features,
|
||||||
|
training_data: TrainingSample,
|
||||||
) -> None:
|
) -> None:
|
||||||
if self.ub_regressor is not None:
|
if self.ub_regressor is not None:
|
||||||
logger.info("Predicting optimal value...")
|
logger.info("Predicting optimal value...")
|
||||||
pred = self.predict([instance])
|
pred = self.predict([instance])
|
||||||
self._predicted_lb = pred["Upper bound"][0]
|
predicted_lb = pred["Upper bound"][0]
|
||||||
self._predicted_ub = pred["Lower bound"][0]
|
predicted_ub = pred["Lower bound"][0]
|
||||||
logger.info(
|
logger.info("Predicted LB=%.2f, UB=%.2f" % (predicted_lb, predicted_ub))
|
||||||
"Predicted values: lb=%.2f, ub=%.2f"
|
if predicted_ub is not None:
|
||||||
% (
|
stats["Objective: Predicted UB"] = predicted_ub
|
||||||
self._predicted_lb,
|
if predicted_lb is not None:
|
||||||
self._predicted_ub,
|
stats["Objective: Predicted LB"] = predicted_lb
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
def after_solve_mip(
|
|
||||||
self,
|
|
||||||
solver: "LearningSolver",
|
|
||||||
instance: Instance,
|
|
||||||
model: Any,
|
|
||||||
stats: LearningSolveStats,
|
|
||||||
training_data: TrainingSample,
|
|
||||||
) -> None:
|
|
||||||
if self._predicted_ub is not None:
|
|
||||||
stats["Objective: predicted UB"] = self._predicted_ub
|
|
||||||
if self._predicted_lb is not None:
|
|
||||||
stats["Objective: predicted LB"] = self._predicted_lb
|
|
||||||
|
|
||||||
def fit(self, training_instances: Union[List[str], List[Instance]]) -> None:
|
def fit(self, training_instances: Union[List[str], List[Instance]]) -> None:
|
||||||
self.lb_regressor = self.lb_regressor_prototype.clone()
|
self.lb_regressor = self.lb_regressor_prototype.clone()
|
||||||
|
|||||||
@@ -62,9 +62,16 @@ class PrimalSolutionComponent(Component):
|
|||||||
self.thresholds: Dict[Hashable, Threshold] = {}
|
self.thresholds: Dict[Hashable, Threshold] = {}
|
||||||
self.threshold_prototype = threshold
|
self.threshold_prototype = threshold
|
||||||
self.classifier_prototype = classifier
|
self.classifier_prototype = classifier
|
||||||
self.stats: Dict[str, float] = {}
|
|
||||||
|
|
||||||
def before_solve_mip(self, solver, instance, model):
|
def before_solve_mip(
|
||||||
|
self,
|
||||||
|
solver: "LearningSolver",
|
||||||
|
instance: Instance,
|
||||||
|
model: Any,
|
||||||
|
stats: LearningSolveStats,
|
||||||
|
features: Features,
|
||||||
|
training_data: TrainingSample,
|
||||||
|
) -> None:
|
||||||
if len(self.thresholds) > 0:
|
if len(self.thresholds) > 0:
|
||||||
logger.info("Predicting MIP solution...")
|
logger.info("Predicting MIP solution...")
|
||||||
solution = self.predict(
|
solution = self.predict(
|
||||||
@@ -72,41 +79,32 @@ class PrimalSolutionComponent(Component):
|
|||||||
instance.training_data[-1],
|
instance.training_data[-1],
|
||||||
)
|
)
|
||||||
|
|
||||||
# Collect prediction statistics
|
# Update statistics
|
||||||
self.stats["Primal: Free"] = 0
|
stats["Primal: Free"] = 0
|
||||||
self.stats["Primal: Zero"] = 0
|
stats["Primal: Zero"] = 0
|
||||||
self.stats["Primal: One"] = 0
|
stats["Primal: One"] = 0
|
||||||
for (var, var_dict) in solution.items():
|
for (var, var_dict) in solution.items():
|
||||||
for (idx, value) in var_dict.items():
|
for (idx, value) in var_dict.items():
|
||||||
if value is None:
|
if value is None:
|
||||||
self.stats["Primal: Free"] += 1
|
stats["Primal: Free"] += 1
|
||||||
else:
|
else:
|
||||||
if value < 0.5:
|
if value < 0.5:
|
||||||
self.stats["Primal: Zero"] += 1
|
stats["Primal: Zero"] += 1
|
||||||
else:
|
else:
|
||||||
self.stats["Primal: One"] += 1
|
stats["Primal: One"] += 1
|
||||||
logger.info(
|
logger.info(
|
||||||
f"Predicted: free: {self.stats['Primal: Free']}, "
|
f"Predicted: free: {stats['Primal: Free']}, "
|
||||||
f"zero: {self.stats['Primal: zero']}, "
|
f"zero: {stats['Primal: Zero']}, "
|
||||||
f"one: {self.stats['Primal: One']}"
|
f"one: {stats['Primal: One']}"
|
||||||
)
|
)
|
||||||
|
|
||||||
# Provide solution to the solver
|
# Provide solution to the solver
|
||||||
|
assert solver.internal_solver is not None
|
||||||
if self.mode == "heuristic":
|
if self.mode == "heuristic":
|
||||||
solver.internal_solver.fix(solution)
|
solver.internal_solver.fix(solution)
|
||||||
else:
|
else:
|
||||||
solver.internal_solver.set_warm_start(solution)
|
solver.internal_solver.set_warm_start(solution)
|
||||||
|
|
||||||
def after_solve_mip(
|
|
||||||
self,
|
|
||||||
solver: "LearningSolver",
|
|
||||||
instance: Instance,
|
|
||||||
model: Any,
|
|
||||||
stats: LearningSolveStats,
|
|
||||||
training_data: TrainingSample,
|
|
||||||
) -> None:
|
|
||||||
stats.update(self.stats)
|
|
||||||
|
|
||||||
def fit_xy(
|
def fit_xy(
|
||||||
self,
|
self,
|
||||||
x: Dict[str, np.ndarray],
|
x: Dict[str, np.ndarray],
|
||||||
|
|||||||
@@ -45,8 +45,23 @@ class ConvertTightIneqsIntoEqsStep(Component):
|
|||||||
self.check_optimality = check_optimality
|
self.check_optimality = check_optimality
|
||||||
self.converted = []
|
self.converted = []
|
||||||
self.original_sense = {}
|
self.original_sense = {}
|
||||||
|
self.n_restored = 0
|
||||||
|
self.n_infeasible_iterations = 0
|
||||||
|
self.n_suboptimal_iterations = 0
|
||||||
|
|
||||||
|
def before_solve_mip(
|
||||||
|
self,
|
||||||
|
solver,
|
||||||
|
instance,
|
||||||
|
model,
|
||||||
|
stats,
|
||||||
|
features,
|
||||||
|
training_data,
|
||||||
|
):
|
||||||
|
self.n_restored = 0
|
||||||
|
self.n_infeasible_iterations = 0
|
||||||
|
self.n_suboptimal_iterations = 0
|
||||||
|
|
||||||
def before_solve_mip(self, solver, instance, _):
|
|
||||||
logger.info("Predicting tight LP constraints...")
|
logger.info("Predicting tight LP constraints...")
|
||||||
x, constraints = DropRedundantInequalitiesStep.x(
|
x, constraints = DropRedundantInequalitiesStep.x(
|
||||||
instance,
|
instance,
|
||||||
@@ -54,11 +69,8 @@ class ConvertTightIneqsIntoEqsStep(Component):
|
|||||||
)
|
)
|
||||||
y = self.predict(x)
|
y = self.predict(x)
|
||||||
|
|
||||||
self.n_converted = 0
|
n_converted = 0
|
||||||
self.n_restored = 0
|
n_kept = 0
|
||||||
self.n_kept = 0
|
|
||||||
self.n_infeasible_iterations = 0
|
|
||||||
self.n_suboptimal_iterations = 0
|
|
||||||
for category in y.keys():
|
for category in y.keys():
|
||||||
for i in range(len(y[category])):
|
for i in range(len(y[category])):
|
||||||
if y[category][i][0] == 1:
|
if y[category][i][0] == 1:
|
||||||
@@ -67,11 +79,13 @@ class ConvertTightIneqsIntoEqsStep(Component):
|
|||||||
self.original_sense[cid] = s
|
self.original_sense[cid] = s
|
||||||
solver.internal_solver.set_constraint_sense(cid, "=")
|
solver.internal_solver.set_constraint_sense(cid, "=")
|
||||||
self.converted += [cid]
|
self.converted += [cid]
|
||||||
self.n_converted += 1
|
n_converted += 1
|
||||||
else:
|
else:
|
||||||
self.n_kept += 1
|
n_kept += 1
|
||||||
|
stats["ConvertTight: Kept"] = n_kept
|
||||||
|
stats["ConvertTight: Converted"] = n_converted
|
||||||
|
|
||||||
logger.info(f"Converted {self.n_converted} inequalities")
|
logger.info(f"Converted {n_converted} inequalities")
|
||||||
|
|
||||||
def after_solve_mip(
|
def after_solve_mip(
|
||||||
self,
|
self,
|
||||||
@@ -79,12 +93,11 @@ class ConvertTightIneqsIntoEqsStep(Component):
|
|||||||
instance,
|
instance,
|
||||||
model,
|
model,
|
||||||
stats,
|
stats,
|
||||||
|
features,
|
||||||
training_data,
|
training_data,
|
||||||
):
|
):
|
||||||
if "slacks" not in training_data.keys():
|
if "slacks" not in training_data.keys():
|
||||||
training_data["slacks"] = solver.internal_solver.get_inequality_slacks()
|
training_data["slacks"] = solver.internal_solver.get_inequality_slacks()
|
||||||
stats["ConvertTight: Kept"] = self.n_kept
|
|
||||||
stats["ConvertTight: Converted"] = self.n_converted
|
|
||||||
stats["ConvertTight: Restored"] = self.n_restored
|
stats["ConvertTight: Restored"] = self.n_restored
|
||||||
stats["ConvertTight: Inf iterations"] = self.n_infeasible_iterations
|
stats["ConvertTight: Inf iterations"] = self.n_infeasible_iterations
|
||||||
stats["ConvertTight: Subopt iterations"] = self.n_suboptimal_iterations
|
stats["ConvertTight: Subopt iterations"] = self.n_suboptimal_iterations
|
||||||
|
|||||||
@@ -46,12 +46,20 @@ class DropRedundantInequalitiesStep(Component):
|
|||||||
self.violation_tolerance = violation_tolerance
|
self.violation_tolerance = violation_tolerance
|
||||||
self.max_iterations = max_iterations
|
self.max_iterations = max_iterations
|
||||||
self.current_iteration = 0
|
self.current_iteration = 0
|
||||||
self.total_dropped = 0
|
self.n_iterations = 0
|
||||||
self.total_restored = 0
|
self.n_restored = 0
|
||||||
self.total_kept = 0
|
|
||||||
self.total_iterations = 0
|
|
||||||
|
|
||||||
def before_solve_mip(self, solver, instance, _):
|
def before_solve_mip(
|
||||||
|
self,
|
||||||
|
solver,
|
||||||
|
instance,
|
||||||
|
model,
|
||||||
|
stats,
|
||||||
|
features,
|
||||||
|
training_data,
|
||||||
|
):
|
||||||
|
self.n_iterations = 0
|
||||||
|
self.n_restored = 0
|
||||||
self.current_iteration = 0
|
self.current_iteration = 0
|
||||||
|
|
||||||
logger.info("Predicting redundant LP constraints...")
|
logger.info("Predicting redundant LP constraints...")
|
||||||
@@ -62,10 +70,8 @@ class DropRedundantInequalitiesStep(Component):
|
|||||||
y = self.predict(x)
|
y = self.predict(x)
|
||||||
|
|
||||||
self.pool = []
|
self.pool = []
|
||||||
self.total_dropped = 0
|
n_dropped = 0
|
||||||
self.total_restored = 0
|
n_kept = 0
|
||||||
self.total_kept = 0
|
|
||||||
self.total_iterations = 0
|
|
||||||
for category in y.keys():
|
for category in y.keys():
|
||||||
for i in range(len(y[category])):
|
for i in range(len(y[category])):
|
||||||
if y[category][i][1] == 1:
|
if y[category][i][1] == 1:
|
||||||
@@ -75,10 +81,12 @@ class DropRedundantInequalitiesStep(Component):
|
|||||||
obj=solver.internal_solver.extract_constraint(cid),
|
obj=solver.internal_solver.extract_constraint(cid),
|
||||||
)
|
)
|
||||||
self.pool += [c]
|
self.pool += [c]
|
||||||
self.total_dropped += 1
|
n_dropped += 1
|
||||||
else:
|
else:
|
||||||
self.total_kept += 1
|
n_kept += 1
|
||||||
logger.info(f"Extracted {self.total_dropped} predicted constraints")
|
stats["DropRedundant: Kept"] = n_kept
|
||||||
|
stats["DropRedundant: Dropped"] = n_dropped
|
||||||
|
logger.info(f"Extracted {n_dropped} predicted constraints")
|
||||||
|
|
||||||
def after_solve_mip(
|
def after_solve_mip(
|
||||||
self,
|
self,
|
||||||
@@ -86,18 +94,13 @@ class DropRedundantInequalitiesStep(Component):
|
|||||||
instance,
|
instance,
|
||||||
model,
|
model,
|
||||||
stats,
|
stats,
|
||||||
|
features,
|
||||||
training_data,
|
training_data,
|
||||||
):
|
):
|
||||||
if "slacks" not in training_data.keys():
|
if "slacks" not in training_data.keys():
|
||||||
training_data["slacks"] = solver.internal_solver.get_inequality_slacks()
|
training_data["slacks"] = solver.internal_solver.get_inequality_slacks()
|
||||||
stats.update(
|
stats["DropRedundant: Iterations"] = self.n_iterations
|
||||||
{
|
stats["DropRedundant: Restored"] = self.n_restored
|
||||||
"DropRedundant: Kept": self.total_kept,
|
|
||||||
"DropRedundant: Dropped": self.total_dropped,
|
|
||||||
"DropRedundant: Restored": self.total_restored,
|
|
||||||
"DropRedundant: Iterations": self.total_iterations,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
def fit(self, training_instances, n_jobs=1):
|
def fit(self, training_instances, n_jobs=1):
|
||||||
x, y = self.x_y(training_instances, n_jobs=n_jobs)
|
x, y = self.x_y(training_instances, n_jobs=n_jobs)
|
||||||
@@ -234,12 +237,12 @@ class DropRedundantInequalitiesStep(Component):
|
|||||||
self.pool.remove(c)
|
self.pool.remove(c)
|
||||||
solver.internal_solver.add_constraint(c.obj)
|
solver.internal_solver.add_constraint(c.obj)
|
||||||
if len(constraints_to_add) > 0:
|
if len(constraints_to_add) > 0:
|
||||||
self.total_restored += len(constraints_to_add)
|
self.n_restored += len(constraints_to_add)
|
||||||
logger.info(
|
logger.info(
|
||||||
"%8d constraints %8d in the pool"
|
"%8d constraints %8d in the pool"
|
||||||
% (len(constraints_to_add), len(self.pool))
|
% (len(constraints_to_add), len(self.pool))
|
||||||
)
|
)
|
||||||
self.total_iterations += 1
|
self.n_iterations += 1
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
return False
|
return False
|
||||||
|
|||||||
@@ -14,16 +14,14 @@ class RelaxIntegralityStep(Component):
|
|||||||
Component that relaxes all integrality constraints before the problem is solved.
|
Component that relaxes all integrality constraints before the problem is solved.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def before_solve_mip(self, solver, instance, _):
|
def before_solve_mip(
|
||||||
logger.info("Relaxing integrality...")
|
|
||||||
solver.internal_solver.relax()
|
|
||||||
|
|
||||||
def after_solve_mip(
|
|
||||||
self,
|
self,
|
||||||
solver,
|
solver,
|
||||||
instance,
|
instance,
|
||||||
model,
|
model,
|
||||||
stats,
|
stats,
|
||||||
|
features,
|
||||||
training_data,
|
training_data,
|
||||||
):
|
):
|
||||||
return
|
logger.info("Relaxing integrality...")
|
||||||
|
solver.internal_solver.relax()
|
||||||
|
|||||||
@@ -178,11 +178,20 @@ class LearningSolver:
|
|||||||
extractor = FeaturesExtractor(self.internal_solver)
|
extractor = FeaturesExtractor(self.internal_solver)
|
||||||
instance.features = extractor.extract(instance)
|
instance.features = extractor.extract(instance)
|
||||||
|
|
||||||
|
callback_args = (
|
||||||
|
self,
|
||||||
|
instance,
|
||||||
|
model,
|
||||||
|
stats,
|
||||||
|
instance.features,
|
||||||
|
training_sample,
|
||||||
|
)
|
||||||
|
|
||||||
# Solve root LP relaxation
|
# Solve root LP relaxation
|
||||||
if self.solve_lp:
|
if self.solve_lp:
|
||||||
logger.debug("Running before_solve_lp callbacks...")
|
logger.debug("Running before_solve_lp callbacks...")
|
||||||
for component in self.components.values():
|
for component in self.components.values():
|
||||||
component.before_solve_lp(self, instance, model)
|
component.before_solve_lp(*callback_args)
|
||||||
|
|
||||||
logger.info("Solving root LP relaxation...")
|
logger.info("Solving root LP relaxation...")
|
||||||
lp_stats = self.internal_solver.solve_lp(tee=tee)
|
lp_stats = self.internal_solver.solve_lp(tee=tee)
|
||||||
@@ -193,7 +202,7 @@ class LearningSolver:
|
|||||||
|
|
||||||
logger.debug("Running after_solve_lp callbacks...")
|
logger.debug("Running after_solve_lp callbacks...")
|
||||||
for component in self.components.values():
|
for component in self.components.values():
|
||||||
component.after_solve_lp(self, instance, model, stats, training_sample)
|
component.after_solve_lp(*callback_args)
|
||||||
else:
|
else:
|
||||||
training_sample["LP solution"] = self.internal_solver.get_empty_solution()
|
training_sample["LP solution"] = self.internal_solver.get_empty_solution()
|
||||||
training_sample["LP value"] = 0.0
|
training_sample["LP value"] = 0.0
|
||||||
@@ -222,7 +231,7 @@ class LearningSolver:
|
|||||||
# Before-solve callbacks
|
# Before-solve callbacks
|
||||||
logger.debug("Running before_solve_mip callbacks...")
|
logger.debug("Running before_solve_mip callbacks...")
|
||||||
for component in self.components.values():
|
for component in self.components.values():
|
||||||
component.before_solve_mip(self, instance, model)
|
component.before_solve_mip(*callback_args)
|
||||||
|
|
||||||
# Solve MIP
|
# Solve MIP
|
||||||
logger.info("Solving MIP...")
|
logger.info("Solving MIP...")
|
||||||
@@ -250,7 +259,7 @@ class LearningSolver:
|
|||||||
# After-solve callbacks
|
# After-solve callbacks
|
||||||
logger.debug("Calling after_solve_mip callbacks...")
|
logger.debug("Calling after_solve_mip callbacks...")
|
||||||
for component in self.components.values():
|
for component in self.components.values():
|
||||||
component.after_solve_mip(self, instance, model, stats, training_sample)
|
component.after_solve_mip(*callback_args)
|
||||||
|
|
||||||
# Write to file, if necessary
|
# Write to file, if necessary
|
||||||
if not discard_output and filename is not None:
|
if not discard_output and filename is not None:
|
||||||
|
|||||||
@@ -59,11 +59,11 @@ LearningSolveStats = TypedDict(
|
|||||||
"MIP log": str,
|
"MIP log": str,
|
||||||
"Mode": str,
|
"Mode": str,
|
||||||
"Nodes": Optional[int],
|
"Nodes": Optional[int],
|
||||||
"Objective: predicted LB": float,
|
"Objective: Predicted LB": float,
|
||||||
"Objective: predicted UB": float,
|
"Objective: Predicted UB": float,
|
||||||
"Primal: free": int,
|
"Primal: Free": int,
|
||||||
"Primal: one": int,
|
"Primal: One": int,
|
||||||
"Primal: zero": int,
|
"Primal: Zero": int,
|
||||||
"Sense": str,
|
"Sense": str,
|
||||||
"Solver": str,
|
"Solver": str,
|
||||||
"Upper bound": Optional[float],
|
"Upper bound": Optional[float],
|
||||||
|
|||||||
@@ -80,7 +80,14 @@ def test_drop_redundant():
|
|||||||
component.classifiers = classifiers
|
component.classifiers = classifiers
|
||||||
|
|
||||||
# LearningSolver calls before_solve
|
# LearningSolver calls before_solve
|
||||||
component.before_solve_mip(solver, instance, None)
|
component.before_solve_mip(
|
||||||
|
solver=solver,
|
||||||
|
instance=instance,
|
||||||
|
model=None,
|
||||||
|
stats={},
|
||||||
|
features=None,
|
||||||
|
training_data=None,
|
||||||
|
)
|
||||||
|
|
||||||
# Should query list of constraints
|
# Should query list of constraints
|
||||||
internal.get_constraint_ids.assert_called_once()
|
internal.get_constraint_ids.assert_called_once()
|
||||||
@@ -123,7 +130,14 @@ def test_drop_redundant():
|
|||||||
|
|
||||||
# LearningSolver calls after_solve
|
# LearningSolver calls after_solve
|
||||||
training_data = {}
|
training_data = {}
|
||||||
component.after_solve_mip(solver, instance, None, {}, training_data)
|
component.after_solve_mip(
|
||||||
|
solver=solver,
|
||||||
|
instance=instance,
|
||||||
|
model=None,
|
||||||
|
stats={},
|
||||||
|
features=None,
|
||||||
|
training_data=training_data,
|
||||||
|
)
|
||||||
|
|
||||||
# Should query slack for all inequalities
|
# Should query slack for all inequalities
|
||||||
internal.get_inequality_slacks.assert_called_once()
|
internal.get_inequality_slacks.assert_called_once()
|
||||||
@@ -147,7 +161,14 @@ def test_drop_redundant_with_check_feasibility():
|
|||||||
component.classifiers = classifiers
|
component.classifiers = classifiers
|
||||||
|
|
||||||
# LearningSolver call before_solve
|
# LearningSolver call before_solve
|
||||||
component.before_solve_mip(solver, instance, None)
|
component.before_solve_mip(
|
||||||
|
solver=solver,
|
||||||
|
instance=instance,
|
||||||
|
model=None,
|
||||||
|
stats={},
|
||||||
|
features=None,
|
||||||
|
training_data=None,
|
||||||
|
)
|
||||||
|
|
||||||
# Assert constraints are extracted
|
# Assert constraints are extracted
|
||||||
assert internal.extract_constraint.call_count == 2
|
assert internal.extract_constraint.call_count == 2
|
||||||
|
|||||||
@@ -86,7 +86,14 @@ def test_lazy_before():
|
|||||||
component.classifiers["a"].predict_proba = Mock(return_value=[[0.95, 0.05]])
|
component.classifiers["a"].predict_proba = Mock(return_value=[[0.95, 0.05]])
|
||||||
component.classifiers["b"].predict_proba = Mock(return_value=[[0.02, 0.80]])
|
component.classifiers["b"].predict_proba = Mock(return_value=[[0.02, 0.80]])
|
||||||
|
|
||||||
component.before_solve_mip(solver, instances[0], models[0])
|
component.before_solve_mip(
|
||||||
|
solver=solver,
|
||||||
|
instance=instances[0],
|
||||||
|
model=models[0],
|
||||||
|
stats=None,
|
||||||
|
features=None,
|
||||||
|
training_data=None,
|
||||||
|
)
|
||||||
|
|
||||||
# Should ask classifier likelihood of each constraint being violated
|
# Should ask classifier likelihood of each constraint being violated
|
||||||
expected_x_test_a = np.array([[67.0, 21.75, 1287.92]])
|
expected_x_test_a = np.array([[67.0, 21.75, 1287.92]])
|
||||||
|
|||||||
@@ -69,7 +69,14 @@ def test_usage_with_solver():
|
|||||||
)
|
)
|
||||||
|
|
||||||
# LearningSolver calls before_solve
|
# LearningSolver calls before_solve
|
||||||
component.before_solve_mip(solver, instance, None)
|
component.before_solve_mip(
|
||||||
|
solver=solver,
|
||||||
|
instance=instance,
|
||||||
|
model=None,
|
||||||
|
stats=None,
|
||||||
|
features=None,
|
||||||
|
training_data=None,
|
||||||
|
)
|
||||||
|
|
||||||
# Should ask if instance has static lazy constraints
|
# Should ask if instance has static lazy constraints
|
||||||
instance.has_static_lazy_constraints.assert_called_once()
|
instance.has_static_lazy_constraints.assert_called_once()
|
||||||
|
|||||||
@@ -157,11 +157,11 @@ def test_xy_sample_without_lp() -> None:
|
|||||||
assert y_actual == y_expected
|
assert y_actual == y_expected
|
||||||
|
|
||||||
|
|
||||||
def test_usage():
|
def test_usage() -> None:
|
||||||
solver = LearningSolver(components=[ObjectiveValueComponent()])
|
solver = LearningSolver(components=[ObjectiveValueComponent()])
|
||||||
instance = get_knapsack_instance(GurobiPyomoSolver())
|
instance = get_knapsack_instance(GurobiPyomoSolver())
|
||||||
solver.solve(instance)
|
solver.solve(instance)
|
||||||
solver.fit([instance])
|
solver.fit([instance])
|
||||||
stats = solver.solve(instance)
|
stats = solver.solve(instance)
|
||||||
assert stats["Lower bound"] == stats["Objective: predicted LB"]
|
assert stats["Lower bound"] == stats["Objective: Predicted LB"]
|
||||||
assert stats["Upper bound"] == stats["Objective: predicted UB"]
|
assert stats["Upper bound"] == stats["Objective: Predicted UB"]
|
||||||
|
|||||||
@@ -226,6 +226,6 @@ def test_usage():
|
|||||||
solver.solve(instance)
|
solver.solve(instance)
|
||||||
solver.fit([instance])
|
solver.fit([instance])
|
||||||
stats = solver.solve(instance)
|
stats = solver.solve(instance)
|
||||||
assert stats["Primal: free"] == 0
|
assert stats["Primal: Free"] == 0
|
||||||
assert stats["Primal: one"] + stats["Primal: zero"] == 10
|
assert stats["Primal: One"] + stats["Primal: Zero"] == 10
|
||||||
assert stats["Lower bound"] == stats["Warm start value"]
|
assert stats["Lower bound"] == stats["Warm start value"]
|
||||||
|
|||||||
@@ -133,7 +133,7 @@ def test_simulate_perfect():
|
|||||||
simulate_perfect=True,
|
simulate_perfect=True,
|
||||||
)
|
)
|
||||||
stats = solver.solve(tmp.name)
|
stats = solver.solve(tmp.name)
|
||||||
assert stats["Lower bound"] == stats["Objective: predicted LB"]
|
assert stats["Lower bound"] == stats["Objective: Predicted LB"]
|
||||||
|
|
||||||
|
|
||||||
def test_gap():
|
def test_gap():
|
||||||
|
|||||||
Reference in New Issue
Block a user