diff --git a/miplearn/components/component.py b/miplearn/components/component.py index 6ad0b2e..0c95f2e 100644 --- a/miplearn/components/component.py +++ b/miplearn/components/component.py @@ -21,6 +21,60 @@ class Component(ABC): strategy. """ + def before_solve_lp( + self, + solver: "LearningSolver", + instance: Instance, + model: Any, + ) -> None: + """ + Method called by LearningSolver before the root LP relaxation is solved. + + Parameters + ---------- + solver + The solver calling this method. + instance + The instance being solved. + model + The concrete optimization model being solved. + """ + return + + def after_solve_lp( + self, + solver: "LearningSolver", + instance: Instance, + model: Any, + stats: LearningSolveStats, + training_data: TrainingSample, + ) -> None: + """ + Method called by LearningSolver after the root LP relaxation is solved. + + Parameters + ---------- + solver: LearningSolver + The solver calling this method. + instance: Instance + The instance being solved. + model: Any + The concrete optimization model being solved. + stats: LearningSolveStats + A dictionary containing statistics about the solution process, such as + number of nodes explored and running time. Components are free to add + their own statistics here. For example, PrimalSolutionComponent adds + statistics regarding the number of predicted variables. All statistics in + this dictionary are exported to the benchmark CSV file. + training_data: TrainingSample + A dictionary containing data that may be useful for training machine + learning models and accelerating the solution process. Components are + free to add their own training data here. For example, + PrimalSolutionComponent adds the current primal solution. The data must + be pickable. + """ + return + def before_solve_mip( self, solver: "LearningSolver", @@ -41,7 +95,6 @@ class Component(ABC): """ return - @abstractmethod def after_solve_mip( self, solver: "LearningSolver", @@ -74,7 +127,7 @@ class Component(ABC): PrimalSolutionComponent adds the current primal solution. The data must be pickable. """ - pass + return def fit( self, diff --git a/miplearn/solvers/gurobi.py b/miplearn/solvers/gurobi.py index 1aeff1a..d333d2d 100644 --- a/miplearn/solvers/gurobi.py +++ b/miplearn/solvers/gurobi.py @@ -144,8 +144,8 @@ class GurobiSolver(InternalSolver): if not self.is_infeasible(): opt_value = self.model.objVal return { - "Optimal value": opt_value, - "Log": log, + "LP value": opt_value, + "LP log": log, } def solve( @@ -205,9 +205,8 @@ class GurobiSolver(InternalSolver): "Wallclock time": total_wallclock_time, "Nodes": total_nodes, "Sense": sense, - "Log": log, + "MIP log": log, "Warm start value": ws_value, - "LP value": None, } return stats diff --git a/miplearn/solvers/learning.py b/miplearn/solvers/learning.py index 714e1d1..882b03d 100644 --- a/miplearn/solvers/learning.py +++ b/miplearn/solvers/learning.py @@ -22,7 +22,7 @@ from miplearn.instance import Instance from miplearn.solvers import _RedirectOutput from miplearn.solvers.internal import InternalSolver from miplearn.solvers.pyomo.gurobi import GurobiPyomoSolver -from miplearn.types import TrainingSample, LearningSolveStats +from miplearn.types import TrainingSample, LearningSolveStats, MIPSolveStats logger = logging.getLogger(__name__) @@ -85,8 +85,8 @@ class LearningSolver: use_lazy_cb: bool If true, use native solver callbacks for enforcing lazy constraints, instead of a simple loop. May not be supported by all solvers. - solve_lp_first: bool - If true, solve LP relaxation first, then solve original MIP. This + solve_lp: bool + If true, solve the root LP relaxation before solving the MIP. This option should be activated if the LP relaxation is not very expensive to solve and if it provides good hints for the integer solution. @@ -103,7 +103,7 @@ class LearningSolver: mode: str = "exact", solver: Callable[[], InternalSolver] = None, use_lazy_cb: bool = False, - solve_lp_first: bool = True, + solve_lp: bool = True, simulate_perfect: bool = False, ): if solver is None: @@ -113,7 +113,7 @@ class LearningSolver: self.internal_solver: Optional[InternalSolver] = None self.mode: str = mode self.simulate_perfect: bool = simulate_perfect - self.solve_lp_first: bool = solve_lp_first + self.solve_lp: bool = solve_lp self.solver_factory: Callable[[], InternalSolver] = solver self.tee = False self.use_lazy_cb: bool = use_lazy_cb @@ -164,6 +164,9 @@ class LearningSolver: instance.training_data = [] instance.training_data += [training_sample] + # Initialize stats + stats: LearningSolveStats = {} + # Initialize internal solver self.tee = tee self.internal_solver = self.solver_factory() @@ -175,22 +178,26 @@ class LearningSolver: extractor = ModelFeaturesExtractor(self.internal_solver) instance.model_features = extractor.extract() - # Solve linear relaxation - if self.solve_lp_first: - logger.info("Solving LP relaxation...") + # Solve root LP relaxation + if self.solve_lp: + logger.debug("Running before_solve_lp callbacks...") + for component in self.components.values(): + component.before_solve_lp(self, instance, model) + + logger.info("Solving root LP relaxation...") lp_stats = self.internal_solver.solve_lp(tee=tee) + stats.update(cast(LearningSolveStats, lp_stats)) training_sample["LP solution"] = self.internal_solver.get_solution() - training_sample["LP value"] = lp_stats["Optimal value"] - training_sample["LP log"] = lp_stats["Log"] + training_sample["LP value"] = lp_stats["LP value"] + training_sample["LP log"] = lp_stats["LP log"] + + logger.debug("Running after_solve_lp callbacks...") + for component in self.components.values(): + component.after_solve_lp(self, instance, model, stats, training_sample) else: training_sample["LP solution"] = self.internal_solver.get_empty_solution() training_sample["LP value"] = 0.0 - # Before-solve callbacks - logger.debug("Running before_solve_mip callbacks...") - for component in self.components.values(): - component.before_solve_mip(self, instance, model) - # Define wrappers def iteration_cb_wrapper() -> bool: should_repeat = False @@ -212,16 +219,19 @@ class LearningSolver: if self.use_lazy_cb: lazy_cb = lazy_cb_wrapper + # Before-solve callbacks + logger.debug("Running before_solve_mip callbacks...") + for component in self.components.values(): + component.before_solve_mip(self, instance, model) + # Solve MIP logger.info("Solving MIP...") - stats = cast( - LearningSolveStats, - self.internal_solver.solve( - tee=tee, - iteration_cb=iteration_cb_wrapper, - lazy_cb=lazy_cb, - ), + mip_stats = self.internal_solver.solve( + tee=tee, + iteration_cb=iteration_cb_wrapper, + lazy_cb=lazy_cb, ) + stats.update(cast(LearningSolveStats, mip_stats)) if "LP value" in training_sample.keys(): stats["LP value"] = training_sample["LP value"] stats["Solver"] = "default" @@ -234,7 +244,7 @@ class LearningSolver: # Add some information to training_sample training_sample["Lower bound"] = stats["Lower bound"] training_sample["Upper bound"] = stats["Upper bound"] - training_sample["MIP log"] = stats["Log"] + training_sample["MIP log"] = stats["MIP log"] training_sample["Solution"] = self.internal_solver.get_solution() # After-solve callbacks diff --git a/miplearn/solvers/pyomo/base.py b/miplearn/solvers/pyomo/base.py index e5d1b33..2791a84 100644 --- a/miplearn/solvers/pyomo/base.py +++ b/miplearn/solvers/pyomo/base.py @@ -67,8 +67,8 @@ class BasePyomoSolver(InternalSolver): if not self.is_infeasible(): opt_value = results["Problem"][0]["Lower bound"] return { - "Optimal value": opt_value, - "Log": streams[0].getvalue(), + "LP value": opt_value, + "LP log": streams[0].getvalue(), } def _restore_integrality(self) -> None: @@ -114,10 +114,9 @@ class BasePyomoSolver(InternalSolver): "Upper bound": ub, "Wallclock time": total_wallclock_time, "Sense": self._obj_sense, - "Log": log, + "MIP log": log, "Nodes": node_count, "Warm start value": ws_value, - "LP value": None, } return stats diff --git a/miplearn/types.py b/miplearn/types.py index 35ab0af..6a5871d 100644 --- a/miplearn/types.py +++ b/miplearn/types.py @@ -28,8 +28,8 @@ TrainingSample = TypedDict( LPSolveStats = TypedDict( "LPSolveStats", { - "Optimal value": Optional[float], - "Log": str, + "LP log": str, + "LP value": Optional[float], }, ) @@ -37,13 +37,12 @@ MIPSolveStats = TypedDict( "MIPSolveStats", { "Lower bound": Optional[float], - "Upper bound": Optional[float], - "Wallclock time": float, + "MIP log": str, "Nodes": Optional[int], "Sense": str, - "Log": str, + "Upper bound": Optional[float], + "Wallclock time": float, "Warm start value": Optional[float], - "LP value": Optional[float], }, ) @@ -52,21 +51,22 @@ LearningSolveStats = TypedDict( { "Gap": Optional[float], "Instance": Union[str, int], + "LP log": str, "LP value": Optional[float], - "Log": str, "Lower bound": Optional[float], + "MIP log": str, "Mode": str, "Nodes": Optional[int], + "Objective: predicted LB": float, + "Objective: predicted UB": float, + "Primal: free": int, + "Primal: one": int, + "Primal: zero": int, "Sense": str, "Solver": str, "Upper bound": Optional[float], "Wallclock time": float, "Warm start value": Optional[float], - "Primal: free": int, - "Primal: zero": int, - "Primal: one": int, - "Objective: predicted LB": float, - "Objective: predicted UB": float, }, total=False, ) diff --git a/tests/components/steps/test_convert_tight.py b/tests/components/steps/test_convert_tight.py index 44557a5..ead5dff 100644 --- a/tests/components/steps/test_convert_tight.py +++ b/tests/components/steps/test_convert_tight.py @@ -68,7 +68,7 @@ def test_convert_tight_infeasibility(): solver = LearningSolver( solver=GurobiSolver, components=[comp], - solve_lp_first=False, + solve_lp=False, ) instance = SampleInstance() stats = solver.solve(instance) @@ -91,7 +91,7 @@ def test_convert_tight_suboptimality(): solver = LearningSolver( solver=GurobiSolver, components=[comp], - solve_lp_first=False, + solve_lp=False, ) instance = SampleInstance() stats = solver.solve(instance) @@ -114,7 +114,7 @@ def test_convert_tight_optimal(): solver = LearningSolver( solver=GurobiSolver, components=[comp], - solve_lp_first=False, + solve_lp=False, ) instance = SampleInstance() stats = solver.solve(instance) diff --git a/tests/solvers/test_internal_solver.py b/tests/solvers/test_internal_solver.py index 7171d53..9229441 100644 --- a/tests/solvers/test_internal_solver.py +++ b/tests/solvers/test_internal_solver.py @@ -93,8 +93,8 @@ def test_internal_solver(): stats = solver.solve_lp() assert not solver.is_infeasible() - assert round(stats["Optimal value"], 3) == 1287.923 - assert len(stats["Log"]) > 100 + assert round(stats["LP value"], 3) == 1287.923 + assert len(stats["LP log"]) > 100 solution = solver.get_solution() assert round(solution["x"][0], 3) == 1.000 @@ -104,7 +104,7 @@ def test_internal_solver(): stats = solver.solve(tee=True) assert not solver.is_infeasible() - assert len(stats["Log"]) > 100 + assert len(stats["MIP log"]) > 100 assert stats["Lower bound"] == 1183.0 assert stats["Upper bound"] == 1183.0 assert stats["Sense"] == "max" @@ -198,7 +198,7 @@ def test_infeasible_instance(): stats = solver.solve_lp() assert solver.get_solution() is None - assert stats["Optimal value"] is None + assert stats["LP value"] is None assert solver.get_value("x", 0) is None diff --git a/tests/solvers/test_learning_solver.py b/tests/solvers/test_learning_solver.py index 4c2f932..168f356 100644 --- a/tests/solvers/test_learning_solver.py +++ b/tests/solvers/test_learning_solver.py @@ -57,7 +57,7 @@ def test_solve_without_lp(): instance = _get_knapsack_instance(internal_solver) solver = LearningSolver( solver=internal_solver, - solve_lp_first=False, + solve_lp=False, ) solver.solve(instance) solver.fit([instance]) diff --git a/tests/test_benchmark.py b/tests/test_benchmark.py index 7be39ec..58083d4 100644 --- a/tests/test_benchmark.py +++ b/tests/test_benchmark.py @@ -29,7 +29,7 @@ def test_benchmark(): benchmark = BenchmarkRunner(test_solvers) benchmark.fit(train_instances) benchmark.parallel_solve(test_instances, n_jobs=2, n_trials=2) - assert benchmark.results.values.shape == (12, 17) + assert benchmark.results.values.shape == (12, 18) benchmark.write_csv("/tmp/benchmark.csv") assert os.path.isfile("/tmp/benchmark.csv")