mirror of
https://github.com/ANL-CEEESA/MIPLearn.git
synced 2025-12-06 09:28:51 -06:00
Merge branch 'feature/convert-ineqs' into dev
This commit is contained in:
6
.gitignore
vendored
6
.gitignore
vendored
@@ -39,8 +39,8 @@ TODO.md
|
|||||||
/site
|
/site
|
||||||
ENV/
|
ENV/
|
||||||
MANIFEST
|
MANIFEST
|
||||||
__pycache__/
|
**/__pycache__/
|
||||||
__pypackages__/
|
**/__pypackages__/
|
||||||
build/
|
build/
|
||||||
celerybeat-schedule
|
celerybeat-schedule
|
||||||
celerybeat.pid
|
celerybeat.pid
|
||||||
@@ -75,3 +75,5 @@ venv.bak/
|
|||||||
venv/
|
venv/
|
||||||
wheels/
|
wheels/
|
||||||
notebooks/
|
notebooks/
|
||||||
|
.vscode
|
||||||
|
tmp
|
||||||
|
|||||||
10
README.md
10
README.md
@@ -14,9 +14,12 @@
|
|||||||
</a>
|
</a>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
**MIPLearn** is an extensible framework for **Learning-Enhanced Mixed-Integer Optimization**, an approach targeted at discrete optimization problems that need to be repeatedly solved with only minor changes to input data.
|
**MIPLearn** is an extensible framework for solving discrete optimization problems using a combination of Mixed-Integer Linear Programming (MIP) and Machine Learning (ML). The framework uses ML methods to automatically identify patterns in previously solved instances of the problem, then uses these patterns to accelerate the performance of conventional state-of-the-art MIP solvers (such as CPLEX, Gurobi or XPRESS).
|
||||||
|
|
||||||
The package uses Machine Learning (ML) to automatically identify patterns in previously solved instances of the problem, or in the solution process itself, and produces hints that can guide a conventional MIP solver towards the optimal solution faster. For particular classes of problems, this approach has been shown to provide significant performance benefits (see [benchmarks](https://anl-ceeesa.github.io/MIPLearn/0.1/problems/) and [references](https://anl-ceeesa.github.io/MIPLearn/0.1/about/)).
|
Unlike pure ML methods, MIPLearn is not only able to find high-quality solutions to discrete optimization problems, but it can also prove the optimality and feasibility of these solutions.
|
||||||
|
Unlike conventional MIP solvers, MIPLearn can take full advantage of very specific observations that happen to be true in a particular family of instances (such as the observation that a particular constraint is typically redundant, or that a particular variable typically assumes a certain value).
|
||||||
|
|
||||||
|
For certain classes of problems, this approach has been shown to provide significant performance benefits (see [benchmarks](https://anl-ceeesa.github.io/MIPLearn/0.1/problems/) and [references](https://anl-ceeesa.github.io/MIPLearn/0.1/about/)).
|
||||||
|
|
||||||
Features
|
Features
|
||||||
--------
|
--------
|
||||||
@@ -35,7 +38,8 @@ For installation instructions, basic usage and benchmarks results, see the [offi
|
|||||||
|
|
||||||
Acknowledgments
|
Acknowledgments
|
||||||
---------------
|
---------------
|
||||||
* Based upon work supported by **Laboratory Directed Research and Development** (LDRD) funding from Argonne National Laboratory, provided by the Director, Office of Science, of the U.S. Department of Energy under Contract No. DE-AC02-06CH11357, and the **U.S. Department of Energy Advanced Grid Modeling Program** under Grant DE-OE0000875.
|
* Based upon work supported by **Laboratory Directed Research and Development** (LDRD) funding from Argonne National Laboratory, provided by the Director, Office of Science, of the U.S. Department of Energy under Contract No. DE-AC02-06CH11357.
|
||||||
|
* Based upon work supported by the **U.S. Department of Energy Advanced Grid Modeling Program** under Grant DE-OE0000875.
|
||||||
|
|
||||||
Citing MIPLearn
|
Citing MIPLearn
|
||||||
---------------
|
---------------
|
||||||
|
|||||||
@@ -1,8 +1,11 @@
|
|||||||
# MIPLearn
|
# MIPLearn
|
||||||
|
|
||||||
**MIPLearn** is an extensible framework for **Learning-Enhanced Mixed-Integer Optimization**, an approach targeted at discrete optimization problems that need to be repeatedly solved with only minor changes to input data.
|
**MIPLearn** is an extensible framework for solving discrete optimization problems using a combination of Mixed-Integer Linear Programming (MIP) and Machine Learning (ML). The framework uses ML methods to automatically identify patterns in previously solved instances of the problem, then uses these patterns to accelerate the performance of conventional state-of-the-art MIP solvers (such as CPLEX, Gurobi or XPRESS).
|
||||||
|
|
||||||
The package uses Machine Learning (ML) to automatically identify patterns in previously solved instances of the problem, or in the solution process itself, and produces hints that can guide a conventional MIP solver towards the optimal solution faster. For particular classes of problems, this approach has been shown to provide significant performance benefits (see [benchmark results](problems.md) and [references](about.md#references) for more details).
|
Unlike pure ML methods, MIPLearn is not only able to find high-quality solutions to discrete optimization problems, but it can also prove the optimality and feasibility of these solutions.
|
||||||
|
Unlike conventional MIP solvers, MIPLearn can take full advantage of very specific observations that happen to be true in a particular family of instances (such as the observation that a particular constraint is typically redundant, or that a particular variable typically assumes a certain value).
|
||||||
|
|
||||||
|
For certain classes of problems, this approach has been shown to provide significant performance benefits (see [benchmarks](problems.md) and [references](about.md)).
|
||||||
|
|
||||||
### Features
|
### Features
|
||||||
|
|
||||||
|
|||||||
@@ -16,7 +16,11 @@ from .components.lazy_static import StaticLazyConstraintsComponent
|
|||||||
from .components.cuts import UserCutsComponent
|
from .components.cuts import UserCutsComponent
|
||||||
from .components.primal import PrimalSolutionComponent
|
from .components.primal import PrimalSolutionComponent
|
||||||
from .components.relaxation import RelaxationComponent
|
from .components.relaxation import RelaxationComponent
|
||||||
|
from .components.steps.convert_tight import ConvertTightIneqsIntoEqsStep
|
||||||
|
from .components.steps.relax_integrality import RelaxIntegralityStep
|
||||||
|
from .components.steps.drop_redundant import DropRedundantInequalitiesStep
|
||||||
|
|
||||||
|
from .classifiers import Classifier, Regressor
|
||||||
from .classifiers.adaptive import AdaptiveClassifier
|
from .classifiers.adaptive import AdaptiveClassifier
|
||||||
from .classifiers.threshold import MinPrecisionThreshold
|
from .classifiers.threshold import MinPrecisionThreshold
|
||||||
|
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ import pandas as pd
|
|||||||
import numpy as np
|
import numpy as np
|
||||||
import logging
|
import logging
|
||||||
from tqdm.auto import tqdm
|
from tqdm.auto import tqdm
|
||||||
|
import os
|
||||||
|
|
||||||
from .solvers.learning import LearningSolver
|
from .solvers.learning import LearningSolver
|
||||||
|
|
||||||
@@ -61,6 +62,7 @@ class BenchmarkRunner:
|
|||||||
return self.results
|
return self.results
|
||||||
|
|
||||||
def save_results(self, filename):
|
def save_results(self, filename):
|
||||||
|
os.makedirs(os.path.dirname(filename), exist_ok=True)
|
||||||
self.results.to_csv(filename)
|
self.results.to_csv(filename)
|
||||||
|
|
||||||
def load_results(self, filename):
|
def load_results(self, filename):
|
||||||
@@ -77,56 +79,36 @@ class BenchmarkRunner:
|
|||||||
def _push_result(self, result, solver, solver_name, instance):
|
def _push_result(self, result, solver, solver_name, instance):
|
||||||
if self.results is None:
|
if self.results is None:
|
||||||
self.results = pd.DataFrame(
|
self.results = pd.DataFrame(
|
||||||
|
# Show the following columns first in the CSV file
|
||||||
columns=[
|
columns=[
|
||||||
"Solver",
|
"Solver",
|
||||||
"Instance",
|
"Instance",
|
||||||
"Wallclock Time",
|
|
||||||
"Lower Bound",
|
|
||||||
"Upper Bound",
|
|
||||||
"Gap",
|
|
||||||
"Nodes",
|
|
||||||
"Mode",
|
|
||||||
"Sense",
|
|
||||||
"Predicted LB",
|
|
||||||
"Predicted UB",
|
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
|
||||||
lb = result["Lower bound"]
|
lb = result["Lower bound"]
|
||||||
ub = result["Upper bound"]
|
ub = result["Upper bound"]
|
||||||
gap = (ub - lb) / lb
|
result["Solver"] = solver_name
|
||||||
if "Predicted LB" not in result:
|
result["Instance"] = instance
|
||||||
result["Predicted LB"] = float("nan")
|
result["Gap"] = (ub - lb) / lb
|
||||||
result["Predicted UB"] = float("nan")
|
result["Mode"] = solver.mode
|
||||||
self.results = self.results.append(
|
self.results = self.results.append(pd.DataFrame([result]))
|
||||||
{
|
|
||||||
"Solver": solver_name,
|
# Compute relative statistics
|
||||||
"Instance": instance,
|
|
||||||
"Wallclock Time": result["Wallclock time"],
|
|
||||||
"Lower Bound": lb,
|
|
||||||
"Upper Bound": ub,
|
|
||||||
"Gap": gap,
|
|
||||||
"Nodes": result["Nodes"],
|
|
||||||
"Mode": solver.mode,
|
|
||||||
"Sense": result["Sense"],
|
|
||||||
"Predicted LB": result["Predicted LB"],
|
|
||||||
"Predicted UB": result["Predicted UB"],
|
|
||||||
},
|
|
||||||
ignore_index=True,
|
|
||||||
)
|
|
||||||
groups = self.results.groupby("Instance")
|
groups = self.results.groupby("Instance")
|
||||||
best_lower_bound = groups["Lower Bound"].transform("max")
|
best_lower_bound = groups["Lower bound"].transform("max")
|
||||||
best_upper_bound = groups["Upper Bound"].transform("min")
|
best_upper_bound = groups["Upper bound"].transform("min")
|
||||||
best_gap = groups["Gap"].transform("min")
|
best_gap = groups["Gap"].transform("min")
|
||||||
best_nodes = np.maximum(1, groups["Nodes"].transform("min"))
|
best_nodes = np.maximum(1, groups["Nodes"].transform("min"))
|
||||||
best_wallclock_time = groups["Wallclock Time"].transform("min")
|
best_wallclock_time = groups["Wallclock time"].transform("min")
|
||||||
self.results["Relative Lower Bound"] = (
|
self.results["Relative lower bound"] = (
|
||||||
self.results["Lower Bound"] / best_lower_bound
|
self.results["Lower bound"] / best_lower_bound
|
||||||
)
|
)
|
||||||
self.results["Relative Upper Bound"] = (
|
self.results["Relative upper bound"] = (
|
||||||
self.results["Upper Bound"] / best_upper_bound
|
self.results["Upper bound"] / best_upper_bound
|
||||||
)
|
)
|
||||||
self.results["Relative Wallclock Time"] = (
|
self.results["Relative wallclock time"] = (
|
||||||
self.results["Wallclock Time"] / best_wallclock_time
|
self.results["Wallclock time"] / best_wallclock_time
|
||||||
)
|
)
|
||||||
self.results["Relative Gap"] = self.results["Gap"] / best_gap
|
self.results["Relative Gap"] = self.results["Gap"] / best_gap
|
||||||
self.results["Relative Nodes"] = self.results["Nodes"] / best_nodes
|
self.results["Relative Nodes"] = self.results["Nodes"] / best_nodes
|
||||||
@@ -143,12 +125,12 @@ class BenchmarkRunner:
|
|||||||
|
|
||||||
sense = results.loc[0, "Sense"]
|
sense = results.loc[0, "Sense"]
|
||||||
if sense == "min":
|
if sense == "min":
|
||||||
primal_column = "Relative Upper Bound"
|
primal_column = "Relative upper bound"
|
||||||
obj_column = "Upper Bound"
|
obj_column = "Upper bound"
|
||||||
predicted_obj_column = "Predicted UB"
|
predicted_obj_column = "Predicted UB"
|
||||||
else:
|
else:
|
||||||
primal_column = "Relative Lower Bound"
|
primal_column = "Relative lower bound"
|
||||||
obj_column = "Lower Bound"
|
obj_column = "Lower bound"
|
||||||
predicted_obj_column = "Predicted LB"
|
predicted_obj_column = "Predicted LB"
|
||||||
|
|
||||||
fig, (ax1, ax2, ax3, ax4) = plt.subplots(
|
fig, (ax1, ax2, ax3, ax4) = plt.subplots(
|
||||||
@@ -158,10 +140,10 @@ class BenchmarkRunner:
|
|||||||
gridspec_kw={"width_ratios": [2, 1, 1, 2]},
|
gridspec_kw={"width_ratios": [2, 1, 1, 2]},
|
||||||
)
|
)
|
||||||
|
|
||||||
# Figure 1: Solver x Wallclock Time
|
# Figure 1: Solver x Wallclock time
|
||||||
sns.stripplot(
|
sns.stripplot(
|
||||||
x="Solver",
|
x="Solver",
|
||||||
y="Wallclock Time",
|
y="Wallclock time",
|
||||||
data=results,
|
data=results,
|
||||||
ax=ax1,
|
ax=ax1,
|
||||||
jitter=0.25,
|
jitter=0.25,
|
||||||
@@ -169,14 +151,14 @@ class BenchmarkRunner:
|
|||||||
)
|
)
|
||||||
sns.barplot(
|
sns.barplot(
|
||||||
x="Solver",
|
x="Solver",
|
||||||
y="Wallclock Time",
|
y="Wallclock time",
|
||||||
data=results,
|
data=results,
|
||||||
ax=ax1,
|
ax=ax1,
|
||||||
errwidth=0.0,
|
errwidth=0.0,
|
||||||
alpha=0.4,
|
alpha=0.4,
|
||||||
estimator=median,
|
estimator=median,
|
||||||
)
|
)
|
||||||
ax1.set(ylabel="Wallclock Time (s)")
|
ax1.set(ylabel="Wallclock time (s)")
|
||||||
|
|
||||||
# Figure 2: Solver x Gap (%)
|
# Figure 2: Solver x Gap (%)
|
||||||
ax2.set_ylim(-0.5, 5.5)
|
ax2.set_ylim(-0.5, 5.5)
|
||||||
|
|||||||
@@ -3,16 +3,54 @@
|
|||||||
# Released under the modified BSD license. See COPYING.md for more details.
|
# Released under the modified BSD license. See COPYING.md for more details.
|
||||||
|
|
||||||
|
|
||||||
class Component:
|
from abc import ABC, abstractmethod
|
||||||
|
|
||||||
|
|
||||||
|
class Component(ABC):
|
||||||
"""
|
"""
|
||||||
A Component is an object which adds functionality to a LearningSolver.
|
A Component is an object which adds functionality to a LearningSolver.
|
||||||
|
|
||||||
|
For better code maintainability, LearningSolver simply delegates most of its
|
||||||
|
functionality to Components. Each Component is responsible for exactly one ML
|
||||||
|
strategy.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def before_solve(self, solver, instance, model):
|
def before_solve(self, solver, instance, model):
|
||||||
return
|
return
|
||||||
|
|
||||||
def after_solve(self, solver, instance, model, results):
|
@abstractmethod
|
||||||
return
|
def after_solve(
|
||||||
|
self,
|
||||||
|
solver,
|
||||||
|
instance,
|
||||||
|
model,
|
||||||
|
stats,
|
||||||
|
training_data,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Method called by LearningSolver after the problem is solved to optimality.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
solver: LearningSolver
|
||||||
|
The solver calling this method.
|
||||||
|
instance: Instance
|
||||||
|
The instance being solved.
|
||||||
|
model:
|
||||||
|
The concrete optimization model being solved.
|
||||||
|
stats: dict
|
||||||
|
A dictionary containing statistics about the solution process, such as
|
||||||
|
number of nodes explored and running time. Components are free to add their own
|
||||||
|
statistics here. For example, PrimalSolutionComponent adds statistics regarding
|
||||||
|
the number of predicted variables. All statistics in this dictionary are exported
|
||||||
|
to the benchmark CSV file.
|
||||||
|
training_data: dict
|
||||||
|
A dictionary containing data that may be useful for training machine learning
|
||||||
|
models and accelerating the solution process. Components are free to add their
|
||||||
|
own training data here. For example, PrimalSolutionComponent adds the current
|
||||||
|
primal solution. The data must be pickable.
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
def fit(self, training_instances):
|
def fit(self, training_instances):
|
||||||
return
|
return
|
||||||
|
|||||||
@@ -25,9 +25,16 @@ class CompositeComponent(Component):
|
|||||||
for child in self.children:
|
for child in self.children:
|
||||||
child.before_solve(solver, instance, model)
|
child.before_solve(solver, instance, model)
|
||||||
|
|
||||||
def after_solve(self, solver, instance, model, results):
|
def after_solve(
|
||||||
|
self,
|
||||||
|
solver,
|
||||||
|
instance,
|
||||||
|
model,
|
||||||
|
stats,
|
||||||
|
training_data,
|
||||||
|
):
|
||||||
for child in self.children:
|
for child in self.children:
|
||||||
child.after_solve(solver, instance, model, results)
|
child.after_solve(solver, instance, model, stats, training_data)
|
||||||
|
|
||||||
def fit(self, training_instances):
|
def fit(self, training_instances):
|
||||||
for child in self.children:
|
for child in self.children:
|
||||||
|
|||||||
@@ -40,7 +40,14 @@ class UserCutsComponent(Component):
|
|||||||
cut = instance.build_user_cut(model, v)
|
cut = instance.build_user_cut(model, v)
|
||||||
solver.internal_solver.add_constraint(cut)
|
solver.internal_solver.add_constraint(cut)
|
||||||
|
|
||||||
def after_solve(self, solver, instance, model, results):
|
def after_solve(
|
||||||
|
self,
|
||||||
|
solver,
|
||||||
|
instance,
|
||||||
|
model,
|
||||||
|
results,
|
||||||
|
training_data,
|
||||||
|
):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def fit(self, training_instances):
|
def fit(self, training_instances):
|
||||||
|
|||||||
@@ -52,7 +52,14 @@ class DynamicLazyConstraintsComponent(Component):
|
|||||||
solver.internal_solver.add_constraint(cut)
|
solver.internal_solver.add_constraint(cut)
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def after_solve(self, solver, instance, model, results):
|
def after_solve(
|
||||||
|
self,
|
||||||
|
solver,
|
||||||
|
instance,
|
||||||
|
model,
|
||||||
|
stats,
|
||||||
|
training_data,
|
||||||
|
):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def fit(self, training_instances):
|
def fit(self, training_instances):
|
||||||
@@ -61,7 +68,7 @@ class DynamicLazyConstraintsComponent(Component):
|
|||||||
|
|
||||||
self.classifiers = {}
|
self.classifiers = {}
|
||||||
violation_to_instance_idx = {}
|
violation_to_instance_idx = {}
|
||||||
for (idx, instance) in enumerate(training_instances):
|
for (idx, instance) in enumerate(InstanceIterator(training_instances)):
|
||||||
for v in instance.found_violated_lazy_constraints:
|
for v in instance.found_violated_lazy_constraints:
|
||||||
if isinstance(v, list):
|
if isinstance(v, list):
|
||||||
v = tuple(v)
|
v = tuple(v)
|
||||||
|
|||||||
@@ -49,7 +49,14 @@ class StaticLazyConstraintsComponent(Component):
|
|||||||
if instance.has_static_lazy_constraints():
|
if instance.has_static_lazy_constraints():
|
||||||
self._extract_and_predict_static(solver, instance)
|
self._extract_and_predict_static(solver, instance)
|
||||||
|
|
||||||
def after_solve(self, solver, instance, model, results):
|
def after_solve(
|
||||||
|
self,
|
||||||
|
solver,
|
||||||
|
instance,
|
||||||
|
model,
|
||||||
|
stats,
|
||||||
|
training_data,
|
||||||
|
):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def iteration_cb(self, solver, instance, model):
|
def iteration_cb(self, solver, instance, model):
|
||||||
|
|||||||
@@ -36,13 +36,20 @@ class ObjectiveValueComponent(Component):
|
|||||||
instance.predicted_lb = lb
|
instance.predicted_lb = lb
|
||||||
logger.info("Predicted values: lb=%.2f, ub=%.2f" % (lb, ub))
|
logger.info("Predicted values: lb=%.2f, ub=%.2f" % (lb, ub))
|
||||||
|
|
||||||
def after_solve(self, solver, instance, model, results):
|
def after_solve(
|
||||||
|
self,
|
||||||
|
solver,
|
||||||
|
instance,
|
||||||
|
model,
|
||||||
|
stats,
|
||||||
|
training_data,
|
||||||
|
):
|
||||||
if self.ub_regressor is not None:
|
if self.ub_regressor is not None:
|
||||||
results["Predicted UB"] = instance.predicted_ub
|
stats["Predicted UB"] = instance.predicted_ub
|
||||||
results["Predicted LB"] = instance.predicted_lb
|
stats["Predicted LB"] = instance.predicted_lb
|
||||||
else:
|
else:
|
||||||
results["Predicted UB"] = None
|
stats["Predicted UB"] = None
|
||||||
results["Predicted LB"] = None
|
stats["Predicted LB"] = None
|
||||||
|
|
||||||
def fit(self, training_instances):
|
def fit(self, training_instances):
|
||||||
logger.debug("Extracting features...")
|
logger.debug("Extracting features...")
|
||||||
|
|||||||
@@ -39,7 +39,14 @@ class PrimalSolutionComponent(Component):
|
|||||||
else:
|
else:
|
||||||
solver.internal_solver.set_warm_start(solution)
|
solver.internal_solver.set_warm_start(solution)
|
||||||
|
|
||||||
def after_solve(self, solver, instance, model, results):
|
def after_solve(
|
||||||
|
self,
|
||||||
|
solver,
|
||||||
|
instance,
|
||||||
|
model,
|
||||||
|
stats,
|
||||||
|
training_data,
|
||||||
|
):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def x(self, training_instances):
|
def x(self, training_instances):
|
||||||
|
|||||||
@@ -3,19 +3,13 @@
|
|||||||
# Released under the modified BSD license. See COPYING.md for more details.
|
# Released under the modified BSD license. See COPYING.md for more details.
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
import sys
|
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
from copy import deepcopy
|
|
||||||
|
|
||||||
from tqdm import tqdm
|
|
||||||
|
|
||||||
from miplearn import Component
|
from miplearn import Component
|
||||||
from miplearn.classifiers.counting import CountingClassifier
|
from miplearn.classifiers.counting import CountingClassifier
|
||||||
from miplearn.components import classifier_evaluation_dict
|
|
||||||
from miplearn.components.composite import CompositeComponent
|
from miplearn.components.composite import CompositeComponent
|
||||||
from miplearn.components.lazy_static import LazyConstraint
|
from miplearn.components.steps.convert_tight import ConvertTightIneqsIntoEqsStep
|
||||||
from miplearn.extractors import InstanceIterator
|
from miplearn.components.steps.drop_redundant import DropRedundantInequalitiesStep
|
||||||
|
from miplearn.components.steps.relax_integrality import RelaxIntegralityStep
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -23,23 +17,26 @@ logger = logging.getLogger(__name__)
|
|||||||
class RelaxationComponent(Component):
|
class RelaxationComponent(Component):
|
||||||
"""
|
"""
|
||||||
A Component that tries to build a relaxation that is simultaneously strong and easy
|
A Component that tries to build a relaxation that is simultaneously strong and easy
|
||||||
to solve.
|
to solve. Currently, this component is composed by three steps:
|
||||||
|
|
||||||
Currently, this component performs the following operations:
|
- RelaxIntegralityStep
|
||||||
- Drops all integrality constraints
|
- DropRedundantInequalitiesStep
|
||||||
- Drops all inequality constraints that are not likely to be binding.
|
- ConvertTightIneqsIntoEqsStep
|
||||||
|
|
||||||
In future versions of MIPLearn, this component may keep some integrality constraints
|
|
||||||
and perform other operations.
|
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
classifier : Classifier, optional
|
redundant_classifier : Classifier, optional
|
||||||
Classifier used to predict whether each constraint is binding or not. One deep
|
Classifier used to predict if a constraint is likely redundant. One deep
|
||||||
copy of this classifier is made for each constraint category.
|
copy of this classifier is made for each constraint category.
|
||||||
threshold : float, optional
|
redundant_threshold : float, optional
|
||||||
If the probability that a constraint is binding exceeds this threshold, the
|
If the probability that a constraint is redundant exceeds this threshold, the
|
||||||
constraint is dropped from the linear relaxation.
|
constraint is dropped from the linear relaxation.
|
||||||
|
tight_classifier : Classifier, optional
|
||||||
|
Classifier used to predict if a constraint is likely to be tight. One deep
|
||||||
|
copy of this classifier is made for each constraint category.
|
||||||
|
tight_threshold : float, optional
|
||||||
|
If the probability that a constraint is tight exceeds this threshold, the
|
||||||
|
constraint is converted into an equality constraint.
|
||||||
slack_tolerance : float, optional
|
slack_tolerance : float, optional
|
||||||
If a constraint has slack greater than this threshold, then the constraint is
|
If a constraint has slack greater than this threshold, then the constraint is
|
||||||
considered loose. By default, this threshold equals a small positive number to
|
considered loose. By default, this threshold equals a small positive number to
|
||||||
@@ -52,30 +49,37 @@ class RelaxationComponent(Component):
|
|||||||
violation_tolerance : float, optional
|
violation_tolerance : float, optional
|
||||||
If `check_dropped` is true, a constraint is considered satisfied during the
|
If `check_dropped` is true, a constraint is considered satisfied during the
|
||||||
check if its violation is smaller than this tolerance.
|
check if its violation is smaller than this tolerance.
|
||||||
max_iterations : int
|
max_check_iterations : int
|
||||||
If `check_dropped` is true, set the maximum number of iterations in the lazy
|
If `check_dropped` is true, set the maximum number of iterations in the lazy
|
||||||
constraint loop.
|
constraint loop.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
classifier=CountingClassifier(),
|
redundant_classifier=CountingClassifier(),
|
||||||
threshold=0.95,
|
redundant_threshold=0.95,
|
||||||
|
tight_classifier=CountingClassifier(),
|
||||||
|
tight_threshold=0.95,
|
||||||
slack_tolerance=1e-5,
|
slack_tolerance=1e-5,
|
||||||
check_dropped=False,
|
check_dropped=False,
|
||||||
violation_tolerance=1e-5,
|
violation_tolerance=1e-5,
|
||||||
max_iterations=3,
|
max_check_iterations=3,
|
||||||
):
|
):
|
||||||
self.steps = [
|
self.steps = [
|
||||||
RelaxIntegralityStep(),
|
RelaxIntegralityStep(),
|
||||||
DropRedundantInequalitiesStep(
|
DropRedundantInequalitiesStep(
|
||||||
classifier=classifier,
|
classifier=redundant_classifier,
|
||||||
threshold=threshold,
|
threshold=redundant_threshold,
|
||||||
slack_tolerance=slack_tolerance,
|
slack_tolerance=slack_tolerance,
|
||||||
violation_tolerance=violation_tolerance,
|
violation_tolerance=violation_tolerance,
|
||||||
max_iterations=max_iterations,
|
max_iterations=max_check_iterations,
|
||||||
check_dropped=check_dropped,
|
check_dropped=check_dropped,
|
||||||
),
|
),
|
||||||
|
ConvertTightIneqsIntoEqsStep(
|
||||||
|
classifier=tight_classifier,
|
||||||
|
threshold=tight_threshold,
|
||||||
|
slack_tolerance=slack_tolerance,
|
||||||
|
),
|
||||||
]
|
]
|
||||||
self.composite = CompositeComponent(self.steps)
|
self.composite = CompositeComponent(self.steps)
|
||||||
|
|
||||||
@@ -90,170 +94,3 @@ class RelaxationComponent(Component):
|
|||||||
|
|
||||||
def iteration_cb(self, solver, instance, model):
|
def iteration_cb(self, solver, instance, model):
|
||||||
return self.composite.iteration_cb(solver, instance, model)
|
return self.composite.iteration_cb(solver, instance, model)
|
||||||
|
|
||||||
|
|
||||||
class RelaxIntegralityStep(Component):
|
|
||||||
def before_solve(self, solver, instance, _):
|
|
||||||
logger.info("Relaxing integrality...")
|
|
||||||
solver.internal_solver.relax()
|
|
||||||
|
|
||||||
|
|
||||||
class DropRedundantInequalitiesStep(Component):
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
classifier=CountingClassifier(),
|
|
||||||
threshold=0.95,
|
|
||||||
slack_tolerance=1e-5,
|
|
||||||
check_dropped=False,
|
|
||||||
violation_tolerance=1e-5,
|
|
||||||
max_iterations=3,
|
|
||||||
):
|
|
||||||
self.classifiers = {}
|
|
||||||
self.classifier_prototype = classifier
|
|
||||||
self.threshold = threshold
|
|
||||||
self.slack_tolerance = slack_tolerance
|
|
||||||
self.pool = []
|
|
||||||
self.check_dropped = check_dropped
|
|
||||||
self.violation_tolerance = violation_tolerance
|
|
||||||
self.max_iterations = max_iterations
|
|
||||||
self.current_iteration = 0
|
|
||||||
|
|
||||||
def before_solve(self, solver, instance, _):
|
|
||||||
self.current_iteration = 0
|
|
||||||
|
|
||||||
logger.info("Predicting redundant LP constraints...")
|
|
||||||
cids = solver.internal_solver.get_constraint_ids()
|
|
||||||
x, constraints = self.x(
|
|
||||||
[instance],
|
|
||||||
constraint_ids=cids,
|
|
||||||
return_constraints=True,
|
|
||||||
)
|
|
||||||
y = self.predict(x)
|
|
||||||
for category in y.keys():
|
|
||||||
for i in range(len(y[category])):
|
|
||||||
if y[category][i][0] == 1:
|
|
||||||
cid = constraints[category][i]
|
|
||||||
c = LazyConstraint(
|
|
||||||
cid=cid,
|
|
||||||
obj=solver.internal_solver.extract_constraint(cid),
|
|
||||||
)
|
|
||||||
self.pool += [c]
|
|
||||||
logger.info("Extracted %d predicted constraints" % len(self.pool))
|
|
||||||
|
|
||||||
def after_solve(self, solver, instance, model, results):
|
|
||||||
instance.slacks = solver.internal_solver.get_constraint_slacks()
|
|
||||||
|
|
||||||
def fit(self, training_instances):
|
|
||||||
logger.debug("Extracting x and y...")
|
|
||||||
x = self.x(training_instances)
|
|
||||||
y = self.y(training_instances)
|
|
||||||
logger.debug("Fitting...")
|
|
||||||
for category in tqdm(x.keys(), desc="Fit (rlx:drop_ineq)"):
|
|
||||||
if category not in self.classifiers:
|
|
||||||
self.classifiers[category] = deepcopy(self.classifier_prototype)
|
|
||||||
self.classifiers[category].fit(x[category], y[category])
|
|
||||||
|
|
||||||
def x(self, instances, constraint_ids=None, return_constraints=False):
|
|
||||||
x = {}
|
|
||||||
constraints = {}
|
|
||||||
for instance in tqdm(
|
|
||||||
InstanceIterator(instances),
|
|
||||||
desc="Extract (rlx:drop_ineq:x)",
|
|
||||||
disable=len(instances) < 5,
|
|
||||||
):
|
|
||||||
if constraint_ids is not None:
|
|
||||||
cids = constraint_ids
|
|
||||||
else:
|
|
||||||
cids = instance.slacks.keys()
|
|
||||||
for cid in cids:
|
|
||||||
category = instance.get_constraint_category(cid)
|
|
||||||
if category is None:
|
|
||||||
continue
|
|
||||||
if category not in x:
|
|
||||||
x[category] = []
|
|
||||||
constraints[category] = []
|
|
||||||
x[category] += [instance.get_constraint_features(cid)]
|
|
||||||
constraints[category] += [cid]
|
|
||||||
if return_constraints:
|
|
||||||
return x, constraints
|
|
||||||
else:
|
|
||||||
return x
|
|
||||||
|
|
||||||
def y(self, instances):
|
|
||||||
y = {}
|
|
||||||
for instance in tqdm(
|
|
||||||
InstanceIterator(instances),
|
|
||||||
desc="Extract (rlx:drop_ineq:y)",
|
|
||||||
disable=len(instances) < 5,
|
|
||||||
):
|
|
||||||
for (cid, slack) in instance.slacks.items():
|
|
||||||
category = instance.get_constraint_category(cid)
|
|
||||||
if category is None:
|
|
||||||
continue
|
|
||||||
if category not in y:
|
|
||||||
y[category] = []
|
|
||||||
if slack > self.slack_tolerance:
|
|
||||||
y[category] += [[1]]
|
|
||||||
else:
|
|
||||||
y[category] += [[0]]
|
|
||||||
return y
|
|
||||||
|
|
||||||
def predict(self, x):
|
|
||||||
y = {}
|
|
||||||
for (category, x_cat) in x.items():
|
|
||||||
if category not in self.classifiers:
|
|
||||||
continue
|
|
||||||
y[category] = []
|
|
||||||
# x_cat = np.array(x_cat)
|
|
||||||
proba = self.classifiers[category].predict_proba(x_cat)
|
|
||||||
for i in range(len(proba)):
|
|
||||||
if proba[i][1] >= self.threshold:
|
|
||||||
y[category] += [[1]]
|
|
||||||
else:
|
|
||||||
y[category] += [[0]]
|
|
||||||
return y
|
|
||||||
|
|
||||||
def evaluate(self, instance):
|
|
||||||
x = self.x([instance])
|
|
||||||
y_true = self.y([instance])
|
|
||||||
y_pred = self.predict(x)
|
|
||||||
tp, tn, fp, fn = 0, 0, 0, 0
|
|
||||||
for category in y_true.keys():
|
|
||||||
for i in range(len(y_true[category])):
|
|
||||||
if y_pred[category][i][0] == 1:
|
|
||||||
if y_true[category][i][0] == 1:
|
|
||||||
tp += 1
|
|
||||||
else:
|
|
||||||
fp += 1
|
|
||||||
else:
|
|
||||||
if y_true[category][i][0] == 1:
|
|
||||||
fn += 1
|
|
||||||
else:
|
|
||||||
tn += 1
|
|
||||||
return classifier_evaluation_dict(tp, tn, fp, fn)
|
|
||||||
|
|
||||||
def iteration_cb(self, solver, instance, model):
|
|
||||||
if not self.check_dropped:
|
|
||||||
return False
|
|
||||||
if self.current_iteration >= self.max_iterations:
|
|
||||||
return False
|
|
||||||
self.current_iteration += 1
|
|
||||||
logger.debug("Checking that dropped constraints are satisfied...")
|
|
||||||
constraints_to_add = []
|
|
||||||
for c in self.pool:
|
|
||||||
if not solver.internal_solver.is_constraint_satisfied(
|
|
||||||
c.obj,
|
|
||||||
self.violation_tolerance,
|
|
||||||
):
|
|
||||||
constraints_to_add.append(c)
|
|
||||||
for c in constraints_to_add:
|
|
||||||
self.pool.remove(c)
|
|
||||||
solver.internal_solver.add_constraint(c.obj)
|
|
||||||
if len(constraints_to_add) > 0:
|
|
||||||
logger.info(
|
|
||||||
"%8d constraints %8d in the pool"
|
|
||||||
% (len(constraints_to_add), len(self.pool))
|
|
||||||
)
|
|
||||||
return True
|
|
||||||
else:
|
|
||||||
return False
|
|
||||||
|
|||||||
0
miplearn/components/steps/__init__.py
Normal file
0
miplearn/components/steps/__init__.py
Normal file
214
miplearn/components/steps/convert_tight.py
Normal file
214
miplearn/components/steps/convert_tight.py
Normal file
@@ -0,0 +1,214 @@
|
|||||||
|
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||||
|
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||||
|
# Released under the modified BSD license. See COPYING.md for more details.
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from copy import deepcopy
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
from tqdm import tqdm
|
||||||
|
import random
|
||||||
|
|
||||||
|
from ... import Component
|
||||||
|
from ...classifiers.counting import CountingClassifier
|
||||||
|
from ...components import classifier_evaluation_dict
|
||||||
|
from ...extractors import InstanceIterator
|
||||||
|
from .drop_redundant import DropRedundantInequalitiesStep
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class ConvertTightIneqsIntoEqsStep(Component):
|
||||||
|
"""
|
||||||
|
Component that predicts which inequality constraints are likely to be binding in
|
||||||
|
the LP relaxation of the problem and converts them into equality constraints.
|
||||||
|
|
||||||
|
This component always makes sure that the conversion process does not affect the
|
||||||
|
feasibility of the problem. It can also, optionally, make sure that it does not affect
|
||||||
|
the optimality, but this may be expensive.
|
||||||
|
|
||||||
|
This component does not work on MIPs. All integrality constraints must be relaxed
|
||||||
|
before this component is used.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
classifier=CountingClassifier(),
|
||||||
|
threshold=0.95,
|
||||||
|
slack_tolerance=0.0,
|
||||||
|
check_optimality=False,
|
||||||
|
):
|
||||||
|
self.classifiers = {}
|
||||||
|
self.classifier_prototype = classifier
|
||||||
|
self.threshold = threshold
|
||||||
|
self.slack_tolerance = slack_tolerance
|
||||||
|
self.check_optimality = check_optimality
|
||||||
|
self.converted = []
|
||||||
|
self.original_sense = {}
|
||||||
|
|
||||||
|
def before_solve(self, solver, instance, _):
|
||||||
|
logger.info("Predicting tight LP constraints...")
|
||||||
|
x, constraints = DropRedundantInequalitiesStep._x_test(
|
||||||
|
instance,
|
||||||
|
constraint_ids=solver.internal_solver.get_constraint_ids(),
|
||||||
|
)
|
||||||
|
y = self.predict(x)
|
||||||
|
|
||||||
|
self.n_converted = 0
|
||||||
|
self.n_restored = 0
|
||||||
|
self.n_kept = 0
|
||||||
|
self.n_infeasible_iterations = 0
|
||||||
|
self.n_suboptimal_iterations = 0
|
||||||
|
for category in y.keys():
|
||||||
|
for i in range(len(y[category])):
|
||||||
|
if y[category][i][0] == 1:
|
||||||
|
cid = constraints[category][i]
|
||||||
|
s = solver.internal_solver.get_constraint_sense(cid)
|
||||||
|
self.original_sense[cid] = s
|
||||||
|
solver.internal_solver.set_constraint_sense(cid, "=")
|
||||||
|
self.converted += [cid]
|
||||||
|
self.n_converted += 1
|
||||||
|
else:
|
||||||
|
self.n_kept += 1
|
||||||
|
|
||||||
|
logger.info(f"Converted {self.n_converted} inequalities")
|
||||||
|
|
||||||
|
def after_solve(
|
||||||
|
self,
|
||||||
|
solver,
|
||||||
|
instance,
|
||||||
|
model,
|
||||||
|
stats,
|
||||||
|
training_data,
|
||||||
|
):
|
||||||
|
if "slacks" not in training_data.keys():
|
||||||
|
training_data["slacks"] = solver.internal_solver.get_inequality_slacks()
|
||||||
|
stats["ConvertTight: Kept"] = self.n_kept
|
||||||
|
stats["ConvertTight: Converted"] = self.n_converted
|
||||||
|
stats["ConvertTight: Restored"] = self.n_restored
|
||||||
|
stats["ConvertTight: Inf iterations"] = self.n_infeasible_iterations
|
||||||
|
stats["ConvertTight: Subopt iterations"] = self.n_suboptimal_iterations
|
||||||
|
|
||||||
|
def fit(self, training_instances):
|
||||||
|
logger.debug("Extracting x and y...")
|
||||||
|
x = self.x(training_instances)
|
||||||
|
y = self.y(training_instances)
|
||||||
|
logger.debug("Fitting...")
|
||||||
|
for category in tqdm(x.keys(), desc="Fit (rlx:conv_ineqs)"):
|
||||||
|
if category not in self.classifiers:
|
||||||
|
self.classifiers[category] = deepcopy(self.classifier_prototype)
|
||||||
|
self.classifiers[category].fit(x[category], y[category])
|
||||||
|
|
||||||
|
def x(self, instances):
|
||||||
|
return DropRedundantInequalitiesStep._x_train(instances)
|
||||||
|
|
||||||
|
def y(self, instances):
|
||||||
|
y = {}
|
||||||
|
for instance in tqdm(
|
||||||
|
InstanceIterator(instances),
|
||||||
|
desc="Extract (rlx:conv_ineqs:y)",
|
||||||
|
disable=len(instances) < 5,
|
||||||
|
):
|
||||||
|
for (cid, slack) in instance.training_data[0]["slacks"].items():
|
||||||
|
category = instance.get_constraint_category(cid)
|
||||||
|
if category is None:
|
||||||
|
continue
|
||||||
|
if category not in y:
|
||||||
|
y[category] = []
|
||||||
|
if 0 <= slack <= self.slack_tolerance:
|
||||||
|
y[category] += [[1]]
|
||||||
|
else:
|
||||||
|
y[category] += [[0]]
|
||||||
|
return y
|
||||||
|
|
||||||
|
def predict(self, x):
|
||||||
|
y = {}
|
||||||
|
for (category, x_cat) in x.items():
|
||||||
|
if category not in self.classifiers:
|
||||||
|
continue
|
||||||
|
y[category] = []
|
||||||
|
x_cat = np.array(x_cat)
|
||||||
|
proba = self.classifiers[category].predict_proba(x_cat)
|
||||||
|
for i in range(len(proba)):
|
||||||
|
if proba[i][1] >= self.threshold:
|
||||||
|
y[category] += [[1]]
|
||||||
|
else:
|
||||||
|
y[category] += [[0]]
|
||||||
|
return y
|
||||||
|
|
||||||
|
def evaluate(self, instance):
|
||||||
|
x = self.x([instance])
|
||||||
|
y_true = self.y([instance])
|
||||||
|
y_pred = self.predict(x)
|
||||||
|
tp, tn, fp, fn = 0, 0, 0, 0
|
||||||
|
for category in y_true.keys():
|
||||||
|
for i in range(len(y_true[category])):
|
||||||
|
if y_pred[category][i][0] == 1:
|
||||||
|
if y_true[category][i][0] == 1:
|
||||||
|
tp += 1
|
||||||
|
else:
|
||||||
|
fp += 1
|
||||||
|
else:
|
||||||
|
if y_true[category][i][0] == 1:
|
||||||
|
fn += 1
|
||||||
|
else:
|
||||||
|
tn += 1
|
||||||
|
return classifier_evaluation_dict(tp, tn, fp, fn)
|
||||||
|
|
||||||
|
def iteration_cb(self, solver, instance, model):
|
||||||
|
is_infeasible, is_suboptimal = False, False
|
||||||
|
restored = []
|
||||||
|
|
||||||
|
def check_pi(msense, csense, pi):
|
||||||
|
if csense == "=":
|
||||||
|
return True
|
||||||
|
if msense == "max":
|
||||||
|
if csense == "<":
|
||||||
|
return pi >= 0
|
||||||
|
else:
|
||||||
|
return pi <= 0
|
||||||
|
else:
|
||||||
|
if csense == ">":
|
||||||
|
return pi >= 0
|
||||||
|
else:
|
||||||
|
return pi <= 0
|
||||||
|
|
||||||
|
def restore(cid):
|
||||||
|
nonlocal restored
|
||||||
|
csense = self.original_sense[cid]
|
||||||
|
solver.internal_solver.set_constraint_sense(cid, csense)
|
||||||
|
restored += [cid]
|
||||||
|
|
||||||
|
if solver.internal_solver.is_infeasible():
|
||||||
|
for cid in self.converted:
|
||||||
|
pi = solver.internal_solver.get_dual(cid)
|
||||||
|
if abs(pi) > 0:
|
||||||
|
is_infeasible = True
|
||||||
|
restore(cid)
|
||||||
|
elif self.check_optimality:
|
||||||
|
random.shuffle(self.converted)
|
||||||
|
n_restored = 0
|
||||||
|
for cid in self.converted:
|
||||||
|
if n_restored >= 100:
|
||||||
|
break
|
||||||
|
pi = solver.internal_solver.get_dual(cid)
|
||||||
|
csense = self.original_sense[cid]
|
||||||
|
msense = solver.internal_solver.get_sense()
|
||||||
|
if not check_pi(msense, csense, pi):
|
||||||
|
is_suboptimal = True
|
||||||
|
restore(cid)
|
||||||
|
n_restored += 1
|
||||||
|
|
||||||
|
for cid in restored:
|
||||||
|
self.converted.remove(cid)
|
||||||
|
|
||||||
|
if len(restored) > 0:
|
||||||
|
self.n_restored += len(restored)
|
||||||
|
if is_infeasible:
|
||||||
|
self.n_infeasible_iterations += 1
|
||||||
|
if is_suboptimal:
|
||||||
|
self.n_suboptimal_iterations += 1
|
||||||
|
logger.info(f"Restored {len(restored)} inequalities")
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
228
miplearn/components/steps/drop_redundant.py
Normal file
228
miplearn/components/steps/drop_redundant.py
Normal file
@@ -0,0 +1,228 @@
|
|||||||
|
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||||
|
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||||
|
# Released under the modified BSD license. See COPYING.md for more details.
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from copy import deepcopy
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
from tqdm import tqdm
|
||||||
|
|
||||||
|
from miplearn import Component
|
||||||
|
from miplearn.classifiers.counting import CountingClassifier
|
||||||
|
from miplearn.components import classifier_evaluation_dict
|
||||||
|
from miplearn.components.lazy_static import LazyConstraint
|
||||||
|
from miplearn.extractors import InstanceIterator
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class DropRedundantInequalitiesStep(Component):
|
||||||
|
"""
|
||||||
|
Component that predicts which inequalities are likely loose in the LP and removes
|
||||||
|
them. Optionally, double checks after the problem is solved that all dropped
|
||||||
|
inequalities were in fact redundant, and, if not, re-adds them to the problem.
|
||||||
|
|
||||||
|
This component does not work on MIPs. All integrality constraints must be relaxed
|
||||||
|
before this component is used.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
classifier=CountingClassifier(),
|
||||||
|
threshold=0.95,
|
||||||
|
slack_tolerance=1e-5,
|
||||||
|
check_feasibility=False,
|
||||||
|
violation_tolerance=1e-5,
|
||||||
|
max_iterations=3,
|
||||||
|
):
|
||||||
|
self.classifiers = {}
|
||||||
|
self.classifier_prototype = classifier
|
||||||
|
self.threshold = threshold
|
||||||
|
self.slack_tolerance = slack_tolerance
|
||||||
|
self.pool = []
|
||||||
|
self.check_feasibility = check_feasibility
|
||||||
|
self.violation_tolerance = violation_tolerance
|
||||||
|
self.max_iterations = max_iterations
|
||||||
|
self.current_iteration = 0
|
||||||
|
|
||||||
|
def before_solve(self, solver, instance, _):
|
||||||
|
self.current_iteration = 0
|
||||||
|
|
||||||
|
logger.info("Predicting redundant LP constraints...")
|
||||||
|
x, constraints = self._x_test(
|
||||||
|
instance,
|
||||||
|
constraint_ids=solver.internal_solver.get_constraint_ids(),
|
||||||
|
)
|
||||||
|
y = self.predict(x)
|
||||||
|
|
||||||
|
self.total_dropped = 0
|
||||||
|
self.total_restored = 0
|
||||||
|
self.total_kept = 0
|
||||||
|
self.total_iterations = 0
|
||||||
|
for category in y.keys():
|
||||||
|
for i in range(len(y[category])):
|
||||||
|
if y[category][i][0] == 1:
|
||||||
|
cid = constraints[category][i]
|
||||||
|
c = LazyConstraint(
|
||||||
|
cid=cid,
|
||||||
|
obj=solver.internal_solver.extract_constraint(cid),
|
||||||
|
)
|
||||||
|
self.pool += [c]
|
||||||
|
self.total_dropped += 1
|
||||||
|
else:
|
||||||
|
self.total_kept += 1
|
||||||
|
logger.info(f"Extracted {self.total_dropped} predicted constraints")
|
||||||
|
|
||||||
|
def after_solve(
|
||||||
|
self,
|
||||||
|
solver,
|
||||||
|
instance,
|
||||||
|
model,
|
||||||
|
stats,
|
||||||
|
training_data,
|
||||||
|
):
|
||||||
|
if "slacks" not in training_data.keys():
|
||||||
|
training_data["slacks"] = solver.internal_solver.get_inequality_slacks()
|
||||||
|
stats.update(
|
||||||
|
{
|
||||||
|
"DropRedundant: Kept": self.total_kept,
|
||||||
|
"DropRedundant: Dropped": self.total_dropped,
|
||||||
|
"DropRedundant: Restored": self.total_restored,
|
||||||
|
"DropRedundant: Iterations": self.total_iterations,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
def fit(self, training_instances):
|
||||||
|
logger.debug("Extracting x and y...")
|
||||||
|
x = self.x(training_instances)
|
||||||
|
y = self.y(training_instances)
|
||||||
|
logger.debug("Fitting...")
|
||||||
|
for category in tqdm(x.keys(), desc="Fit (rlx:drop_ineq)"):
|
||||||
|
if category not in self.classifiers:
|
||||||
|
self.classifiers[category] = deepcopy(self.classifier_prototype)
|
||||||
|
self.classifiers[category].fit(x[category], y[category])
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _x_test(instance, constraint_ids):
|
||||||
|
x = {}
|
||||||
|
constraints = {}
|
||||||
|
cids = constraint_ids
|
||||||
|
for cid in cids:
|
||||||
|
category = instance.get_constraint_category(cid)
|
||||||
|
if category is None:
|
||||||
|
continue
|
||||||
|
if category not in x:
|
||||||
|
x[category] = []
|
||||||
|
constraints[category] = []
|
||||||
|
x[category] += [instance.get_constraint_features(cid)]
|
||||||
|
constraints[category] += [cid]
|
||||||
|
for category in x.keys():
|
||||||
|
x[category] = np.array(x[category])
|
||||||
|
return x, constraints
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _x_train(instances):
|
||||||
|
x = {}
|
||||||
|
for instance in tqdm(
|
||||||
|
InstanceIterator(instances),
|
||||||
|
desc="Extract (rlx:drop_ineq:x)",
|
||||||
|
disable=len(instances) < 5,
|
||||||
|
):
|
||||||
|
for training_data in instance.training_data:
|
||||||
|
cids = training_data["slacks"].keys()
|
||||||
|
for cid in cids:
|
||||||
|
category = instance.get_constraint_category(cid)
|
||||||
|
if category is None:
|
||||||
|
continue
|
||||||
|
if category not in x:
|
||||||
|
x[category] = []
|
||||||
|
x[category] += [instance.get_constraint_features(cid)]
|
||||||
|
for category in x.keys():
|
||||||
|
x[category] = np.array(x[category])
|
||||||
|
return x
|
||||||
|
|
||||||
|
def x(self, instances):
|
||||||
|
return self._x_train(instances)
|
||||||
|
|
||||||
|
def y(self, instances):
|
||||||
|
y = {}
|
||||||
|
for instance in tqdm(
|
||||||
|
InstanceIterator(instances),
|
||||||
|
desc="Extract (rlx:drop_ineq:y)",
|
||||||
|
disable=len(instances) < 5,
|
||||||
|
):
|
||||||
|
for training_data in instance.training_data:
|
||||||
|
for (cid, slack) in training_data["slacks"].items():
|
||||||
|
category = instance.get_constraint_category(cid)
|
||||||
|
if category is None:
|
||||||
|
continue
|
||||||
|
if category not in y:
|
||||||
|
y[category] = []
|
||||||
|
if slack > self.slack_tolerance:
|
||||||
|
y[category] += [[1]]
|
||||||
|
else:
|
||||||
|
y[category] += [[0]]
|
||||||
|
return y
|
||||||
|
|
||||||
|
def predict(self, x):
|
||||||
|
y = {}
|
||||||
|
for (category, x_cat) in x.items():
|
||||||
|
if category not in self.classifiers:
|
||||||
|
continue
|
||||||
|
y[category] = []
|
||||||
|
x_cat = np.array(x_cat)
|
||||||
|
proba = self.classifiers[category].predict_proba(x_cat)
|
||||||
|
for i in range(len(proba)):
|
||||||
|
if proba[i][1] >= self.threshold:
|
||||||
|
y[category] += [[1]]
|
||||||
|
else:
|
||||||
|
y[category] += [[0]]
|
||||||
|
return y
|
||||||
|
|
||||||
|
def evaluate(self, instance):
|
||||||
|
x = self.x([instance])
|
||||||
|
y_true = self.y([instance])
|
||||||
|
y_pred = self.predict(x)
|
||||||
|
tp, tn, fp, fn = 0, 0, 0, 0
|
||||||
|
for category in y_true.keys():
|
||||||
|
for i in range(len(y_true[category])):
|
||||||
|
if y_pred[category][i][0] == 1:
|
||||||
|
if y_true[category][i][0] == 1:
|
||||||
|
tp += 1
|
||||||
|
else:
|
||||||
|
fp += 1
|
||||||
|
else:
|
||||||
|
if y_true[category][i][0] == 1:
|
||||||
|
fn += 1
|
||||||
|
else:
|
||||||
|
tn += 1
|
||||||
|
return classifier_evaluation_dict(tp, tn, fp, fn)
|
||||||
|
|
||||||
|
def iteration_cb(self, solver, instance, model):
|
||||||
|
if not self.check_feasibility:
|
||||||
|
return False
|
||||||
|
if self.current_iteration >= self.max_iterations:
|
||||||
|
return False
|
||||||
|
self.current_iteration += 1
|
||||||
|
logger.debug("Checking that dropped constraints are satisfied...")
|
||||||
|
constraints_to_add = []
|
||||||
|
for c in self.pool:
|
||||||
|
if not solver.internal_solver.is_constraint_satisfied(
|
||||||
|
c.obj,
|
||||||
|
self.violation_tolerance,
|
||||||
|
):
|
||||||
|
constraints_to_add.append(c)
|
||||||
|
for c in constraints_to_add:
|
||||||
|
self.pool.remove(c)
|
||||||
|
solver.internal_solver.add_constraint(c.obj)
|
||||||
|
if len(constraints_to_add) > 0:
|
||||||
|
self.total_restored += len(constraints_to_add)
|
||||||
|
logger.info(
|
||||||
|
"%8d constraints %8d in the pool"
|
||||||
|
% (len(constraints_to_add), len(self.pool))
|
||||||
|
)
|
||||||
|
self.total_iterations += 1
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
29
miplearn/components/steps/relax_integrality.py
Normal file
29
miplearn/components/steps/relax_integrality.py
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||||
|
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||||
|
# Released under the modified BSD license. See COPYING.md for more details.
|
||||||
|
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from miplearn import Component
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class RelaxIntegralityStep(Component):
|
||||||
|
"""
|
||||||
|
Component that relaxes all integrality constraints before the problem is solved.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def before_solve(self, solver, instance, _):
|
||||||
|
logger.info("Relaxing integrality...")
|
||||||
|
solver.internal_solver.relax()
|
||||||
|
|
||||||
|
def after_solve(
|
||||||
|
self,
|
||||||
|
solver,
|
||||||
|
instance,
|
||||||
|
model,
|
||||||
|
stats,
|
||||||
|
training_data,
|
||||||
|
):
|
||||||
|
return
|
||||||
0
miplearn/components/steps/tests/__init__.py
Normal file
0
miplearn/components/steps/tests/__init__.py
Normal file
121
miplearn/components/steps/tests/test_convert_tight.py
Normal file
121
miplearn/components/steps/tests/test_convert_tight.py
Normal file
@@ -0,0 +1,121 @@
|
|||||||
|
from miplearn import LearningSolver, GurobiSolver, Instance, Classifier
|
||||||
|
from miplearn.components.steps.convert_tight import ConvertTightIneqsIntoEqsStep
|
||||||
|
from miplearn.components.steps.relax_integrality import RelaxIntegralityStep
|
||||||
|
from miplearn.problems.knapsack import GurobiKnapsackInstance
|
||||||
|
|
||||||
|
from unittest.mock import Mock
|
||||||
|
|
||||||
|
|
||||||
|
def test_convert_tight_usage():
|
||||||
|
instance = GurobiKnapsackInstance(
|
||||||
|
weights=[3.0, 5.0, 10.0],
|
||||||
|
prices=[1.0, 1.0, 1.0],
|
||||||
|
capacity=16.0,
|
||||||
|
)
|
||||||
|
solver = LearningSolver(
|
||||||
|
solver=GurobiSolver(),
|
||||||
|
components=[
|
||||||
|
RelaxIntegralityStep(),
|
||||||
|
ConvertTightIneqsIntoEqsStep(),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
# Solve original problem
|
||||||
|
solver.solve(instance)
|
||||||
|
original_upper_bound = instance.upper_bound
|
||||||
|
|
||||||
|
# Should collect training data
|
||||||
|
assert instance.training_data[0]["slacks"]["eq_capacity"] == 0.0
|
||||||
|
|
||||||
|
# Fit and resolve
|
||||||
|
solver.fit([instance])
|
||||||
|
stats = solver.solve(instance)
|
||||||
|
|
||||||
|
# Objective value should be the same
|
||||||
|
assert instance.upper_bound == original_upper_bound
|
||||||
|
assert stats["ConvertTight: Inf iterations"] == 0
|
||||||
|
assert stats["ConvertTight: Subopt iterations"] == 0
|
||||||
|
|
||||||
|
|
||||||
|
class TestInstance(Instance):
|
||||||
|
def to_model(self):
|
||||||
|
import gurobipy as grb
|
||||||
|
from gurobipy import GRB
|
||||||
|
|
||||||
|
m = grb.Model("model")
|
||||||
|
x1 = m.addVar(name="x1")
|
||||||
|
x2 = m.addVar(name="x2")
|
||||||
|
m.setObjective(x1 + 2 * x2, grb.GRB.MAXIMIZE)
|
||||||
|
m.addConstr(x1 <= 2, name="c1")
|
||||||
|
m.addConstr(x2 <= 2, name="c2")
|
||||||
|
m.addConstr(x1 + x2 <= 3, name="c2")
|
||||||
|
return m
|
||||||
|
|
||||||
|
|
||||||
|
def test_convert_tight_infeasibility():
|
||||||
|
comp = ConvertTightIneqsIntoEqsStep()
|
||||||
|
comp.classifiers = {
|
||||||
|
"c1": Mock(spec=Classifier),
|
||||||
|
"c2": Mock(spec=Classifier),
|
||||||
|
"c3": Mock(spec=Classifier),
|
||||||
|
}
|
||||||
|
comp.classifiers["c1"].predict_proba = Mock(return_value=[[0, 1]])
|
||||||
|
comp.classifiers["c2"].predict_proba = Mock(return_value=[[0, 1]])
|
||||||
|
comp.classifiers["c3"].predict_proba = Mock(return_value=[[1, 0]])
|
||||||
|
|
||||||
|
solver = LearningSolver(
|
||||||
|
solver=GurobiSolver(params={}),
|
||||||
|
components=[comp],
|
||||||
|
solve_lp_first=False,
|
||||||
|
)
|
||||||
|
instance = TestInstance()
|
||||||
|
stats = solver.solve(instance)
|
||||||
|
assert instance.lower_bound == 5.0
|
||||||
|
assert stats["ConvertTight: Inf iterations"] == 1
|
||||||
|
assert stats["ConvertTight: Subopt iterations"] == 0
|
||||||
|
|
||||||
|
|
||||||
|
def test_convert_tight_suboptimality():
|
||||||
|
comp = ConvertTightIneqsIntoEqsStep(check_optimality=True)
|
||||||
|
comp.classifiers = {
|
||||||
|
"c1": Mock(spec=Classifier),
|
||||||
|
"c2": Mock(spec=Classifier),
|
||||||
|
"c3": Mock(spec=Classifier),
|
||||||
|
}
|
||||||
|
comp.classifiers["c1"].predict_proba = Mock(return_value=[[0, 1]])
|
||||||
|
comp.classifiers["c2"].predict_proba = Mock(return_value=[[1, 0]])
|
||||||
|
comp.classifiers["c3"].predict_proba = Mock(return_value=[[0, 1]])
|
||||||
|
|
||||||
|
solver = LearningSolver(
|
||||||
|
solver=GurobiSolver(params={}),
|
||||||
|
components=[comp],
|
||||||
|
solve_lp_first=False,
|
||||||
|
)
|
||||||
|
instance = TestInstance()
|
||||||
|
stats = solver.solve(instance)
|
||||||
|
assert instance.lower_bound == 5.0
|
||||||
|
assert stats["ConvertTight: Inf iterations"] == 0
|
||||||
|
assert stats["ConvertTight: Subopt iterations"] == 1
|
||||||
|
|
||||||
|
|
||||||
|
def test_convert_tight_optimal():
|
||||||
|
comp = ConvertTightIneqsIntoEqsStep()
|
||||||
|
comp.classifiers = {
|
||||||
|
"c1": Mock(spec=Classifier),
|
||||||
|
"c2": Mock(spec=Classifier),
|
||||||
|
"c3": Mock(spec=Classifier),
|
||||||
|
}
|
||||||
|
comp.classifiers["c1"].predict_proba = Mock(return_value=[[1, 0]])
|
||||||
|
comp.classifiers["c2"].predict_proba = Mock(return_value=[[0, 1]])
|
||||||
|
comp.classifiers["c3"].predict_proba = Mock(return_value=[[0, 1]])
|
||||||
|
|
||||||
|
solver = LearningSolver(
|
||||||
|
solver=GurobiSolver(params={}),
|
||||||
|
components=[comp],
|
||||||
|
solve_lp_first=False,
|
||||||
|
)
|
||||||
|
instance = TestInstance()
|
||||||
|
stats = solver.solve(instance)
|
||||||
|
assert instance.lower_bound == 5.0
|
||||||
|
assert stats["ConvertTight: Inf iterations"] == 0
|
||||||
|
assert stats["ConvertTight: Subopt iterations"] == 0
|
||||||
@@ -2,11 +2,21 @@
|
|||||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||||
# Released under the modified BSD license. See COPYING.md for more details.
|
# Released under the modified BSD license. See COPYING.md for more details.
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
from unittest.mock import Mock, call
|
from unittest.mock import Mock, call
|
||||||
|
|
||||||
from miplearn import RelaxationComponent, LearningSolver, Instance, InternalSolver
|
from miplearn import (
|
||||||
|
LearningSolver,
|
||||||
|
Instance,
|
||||||
|
InternalSolver,
|
||||||
|
GurobiSolver,
|
||||||
|
)
|
||||||
from miplearn.classifiers import Classifier
|
from miplearn.classifiers import Classifier
|
||||||
from miplearn.components.relaxation import DropRedundantInequalitiesStep
|
from miplearn.components.relaxation import (
|
||||||
|
DropRedundantInequalitiesStep,
|
||||||
|
RelaxIntegralityStep,
|
||||||
|
)
|
||||||
|
from miplearn.problems.knapsack import GurobiKnapsackInstance
|
||||||
|
|
||||||
|
|
||||||
def _setup():
|
def _setup():
|
||||||
@@ -14,7 +24,7 @@ def _setup():
|
|||||||
|
|
||||||
internal = solver.internal_solver = Mock(spec=InternalSolver)
|
internal = solver.internal_solver = Mock(spec=InternalSolver)
|
||||||
internal.get_constraint_ids = Mock(return_value=["c1", "c2", "c3", "c4"])
|
internal.get_constraint_ids = Mock(return_value=["c1", "c2", "c3", "c4"])
|
||||||
internal.get_constraint_slacks = Mock(
|
internal.get_inequality_slacks = Mock(
|
||||||
side_effect=lambda: {
|
side_effect=lambda: {
|
||||||
"c1": 0.5,
|
"c1": 0.5,
|
||||||
"c2": 0.0,
|
"c2": 0.0,
|
||||||
@@ -28,9 +38,9 @@ def _setup():
|
|||||||
instance = Mock(spec=Instance)
|
instance = Mock(spec=Instance)
|
||||||
instance.get_constraint_features = Mock(
|
instance.get_constraint_features = Mock(
|
||||||
side_effect=lambda cid: {
|
side_effect=lambda cid: {
|
||||||
"c2": [1.0, 0.0],
|
"c2": np.array([1.0, 0.0]),
|
||||||
"c3": [0.5, 0.5],
|
"c3": np.array([0.5, 0.5]),
|
||||||
"c4": [1.0],
|
"c4": np.array([1.0]),
|
||||||
}[cid]
|
}[cid]
|
||||||
)
|
)
|
||||||
instance.get_constraint_category = Mock(
|
instance.get_constraint_category = Mock(
|
||||||
@@ -47,33 +57,33 @@ def _setup():
|
|||||||
"type-b": Mock(spec=Classifier),
|
"type-b": Mock(spec=Classifier),
|
||||||
}
|
}
|
||||||
classifiers["type-a"].predict_proba = Mock(
|
classifiers["type-a"].predict_proba = Mock(
|
||||||
return_value=[
|
return_value=np.array(
|
||||||
[0.20, 0.80],
|
[
|
||||||
[0.05, 0.95],
|
[0.20, 0.80],
|
||||||
]
|
[0.05, 0.95],
|
||||||
|
]
|
||||||
|
)
|
||||||
)
|
)
|
||||||
classifiers["type-b"].predict_proba = Mock(
|
classifiers["type-b"].predict_proba = Mock(
|
||||||
return_value=[
|
return_value=np.array(
|
||||||
[0.02, 0.98],
|
[
|
||||||
]
|
[0.02, 0.98],
|
||||||
|
]
|
||||||
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
return solver, internal, instance, classifiers
|
return solver, internal, instance, classifiers
|
||||||
|
|
||||||
|
|
||||||
def test_usage():
|
def test_drop_redundant():
|
||||||
solver, internal, instance, classifiers = _setup()
|
solver, internal, instance, classifiers = _setup()
|
||||||
|
|
||||||
component = RelaxationComponent()
|
component = DropRedundantInequalitiesStep()
|
||||||
drop_ineqs_step = component.steps[1]
|
component.classifiers = classifiers
|
||||||
drop_ineqs_step.classifiers = classifiers
|
|
||||||
|
|
||||||
# LearningSolver calls before_solve
|
# LearningSolver calls before_solve
|
||||||
component.before_solve(solver, instance, None)
|
component.before_solve(solver, instance, None)
|
||||||
|
|
||||||
# Should relax integrality of the problem
|
|
||||||
internal.relax.assert_called_once()
|
|
||||||
|
|
||||||
# Should query list of constraints
|
# Should query list of constraints
|
||||||
internal.get_constraint_ids.assert_called_once()
|
internal.get_constraint_ids.assert_called_once()
|
||||||
|
|
||||||
@@ -99,10 +109,10 @@ def test_usage():
|
|||||||
)
|
)
|
||||||
|
|
||||||
# Should ask ML to predict whether constraint should be removed
|
# Should ask ML to predict whether constraint should be removed
|
||||||
drop_ineqs_step.classifiers["type-a"].predict_proba.assert_called_once_with(
|
type_a_actual = component.classifiers["type-a"].predict_proba.call_args[0][0]
|
||||||
[[1.0, 0.0], [0.5, 0.5]]
|
type_b_actual = component.classifiers["type-b"].predict_proba.call_args[0][0]
|
||||||
)
|
np.testing.assert_array_equal(type_a_actual, np.array([[1.0, 0.0], [0.5, 0.5]]))
|
||||||
drop_ineqs_step.classifiers["type-b"].predict_proba.assert_called_once_with([[1.0]])
|
np.testing.assert_array_equal(type_b_actual, np.array([[1.0]]))
|
||||||
|
|
||||||
# Should ask internal solver to remove constraints predicted as redundant
|
# Should ask internal solver to remove constraints predicted as redundant
|
||||||
assert internal.extract_constraint.call_count == 2
|
assert internal.extract_constraint.call_count == 2
|
||||||
@@ -114,14 +124,14 @@ def test_usage():
|
|||||||
)
|
)
|
||||||
|
|
||||||
# LearningSolver calls after_solve
|
# LearningSolver calls after_solve
|
||||||
component.after_solve(solver, instance, None, None)
|
training_data = {}
|
||||||
|
component.after_solve(solver, instance, None, {}, training_data)
|
||||||
|
|
||||||
# Should query slack for all constraints
|
# Should query slack for all inequalities
|
||||||
internal.get_constraint_slacks.assert_called_once()
|
internal.get_inequality_slacks.assert_called_once()
|
||||||
|
|
||||||
# Should store constraint slacks in instance object
|
# Should store constraint slacks in instance object
|
||||||
assert hasattr(instance, "slacks")
|
assert training_data["slacks"] == {
|
||||||
assert instance.slacks == {
|
|
||||||
"c1": 0.5,
|
"c1": 0.5,
|
||||||
"c2": 0.0,
|
"c2": 0.0,
|
||||||
"c3": 0.0,
|
"c3": 0.0,
|
||||||
@@ -129,12 +139,14 @@ def test_usage():
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def test_usage_with_check_dropped():
|
def test_drop_redundant_with_check_feasibility():
|
||||||
solver, internal, instance, classifiers = _setup()
|
solver, internal, instance, classifiers = _setup()
|
||||||
|
|
||||||
component = RelaxationComponent(check_dropped=True, violation_tolerance=1e-3)
|
component = DropRedundantInequalitiesStep(
|
||||||
drop_ineqs_step = component.steps[1]
|
check_feasibility=True,
|
||||||
drop_ineqs_step.classifiers = classifiers
|
violation_tolerance=1e-3,
|
||||||
|
)
|
||||||
|
component.classifiers = classifiers
|
||||||
|
|
||||||
# LearningSolver call before_solve
|
# LearningSolver call before_solve
|
||||||
component.before_solve(solver, instance, None)
|
component.before_solve(solver, instance, None)
|
||||||
@@ -179,23 +191,29 @@ def test_x_y_fit_predict_evaluate():
|
|||||||
}
|
}
|
||||||
component.classifiers["type-a"].predict_proba = Mock(
|
component.classifiers["type-a"].predict_proba = Mock(
|
||||||
return_value=[
|
return_value=[
|
||||||
[0.20, 0.80],
|
np.array([0.20, 0.80]),
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
component.classifiers["type-b"].predict_proba = Mock(
|
component.classifiers["type-b"].predict_proba = Mock(
|
||||||
return_value=[
|
return_value=np.array(
|
||||||
[0.50, 0.50],
|
[
|
||||||
[0.05, 0.95],
|
[0.50, 0.50],
|
||||||
]
|
[0.05, 0.95],
|
||||||
|
]
|
||||||
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
# First mock instance
|
# First mock instance
|
||||||
instances[0].slacks = {
|
instances[0].training_data = [
|
||||||
"c1": 0.00,
|
{
|
||||||
"c2": 0.05,
|
"slacks": {
|
||||||
"c3": 0.00,
|
"c1": 0.00,
|
||||||
"c4": 30.0,
|
"c2": 0.05,
|
||||||
}
|
"c3": 0.00,
|
||||||
|
"c4": 30.0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
instances[0].get_constraint_category = Mock(
|
instances[0].get_constraint_category = Mock(
|
||||||
side_effect=lambda cid: {
|
side_effect=lambda cid: {
|
||||||
"c1": None,
|
"c1": None,
|
||||||
@@ -206,19 +224,23 @@ def test_x_y_fit_predict_evaluate():
|
|||||||
)
|
)
|
||||||
instances[0].get_constraint_features = Mock(
|
instances[0].get_constraint_features = Mock(
|
||||||
side_effect=lambda cid: {
|
side_effect=lambda cid: {
|
||||||
"c2": [1.0, 0.0],
|
"c2": np.array([1.0, 0.0]),
|
||||||
"c3": [0.5, 0.5],
|
"c3": np.array([0.5, 0.5]),
|
||||||
"c4": [1.0],
|
"c4": np.array([1.0]),
|
||||||
}[cid]
|
}[cid]
|
||||||
)
|
)
|
||||||
|
|
||||||
# Second mock instance
|
# Second mock instance
|
||||||
instances[1].slacks = {
|
instances[1].training_data = [
|
||||||
"c1": 0.00,
|
{
|
||||||
"c3": 0.30,
|
"slacks": {
|
||||||
"c4": 0.00,
|
"c1": 0.00,
|
||||||
"c5": 0.00,
|
"c3": 0.30,
|
||||||
}
|
"c4": 0.00,
|
||||||
|
"c5": 0.00,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
instances[1].get_constraint_category = Mock(
|
instances[1].get_constraint_category = Mock(
|
||||||
side_effect=lambda cid: {
|
side_effect=lambda cid: {
|
||||||
"c1": None,
|
"c1": None,
|
||||||
@@ -229,32 +251,47 @@ def test_x_y_fit_predict_evaluate():
|
|||||||
)
|
)
|
||||||
instances[1].get_constraint_features = Mock(
|
instances[1].get_constraint_features = Mock(
|
||||||
side_effect=lambda cid: {
|
side_effect=lambda cid: {
|
||||||
"c3": [0.3, 0.4],
|
"c3": np.array([0.3, 0.4]),
|
||||||
"c4": [0.7],
|
"c4": np.array([0.7]),
|
||||||
"c5": [0.8],
|
"c5": np.array([0.8]),
|
||||||
}[cid]
|
}[cid]
|
||||||
)
|
)
|
||||||
|
|
||||||
expected_x = {
|
expected_x = {
|
||||||
"type-a": [[1.0, 0.0], [0.5, 0.5], [0.3, 0.4]],
|
"type-a": np.array(
|
||||||
"type-b": [[1.0], [0.7], [0.8]],
|
[
|
||||||
|
[1.0, 0.0],
|
||||||
|
[0.5, 0.5],
|
||||||
|
[0.3, 0.4],
|
||||||
|
]
|
||||||
|
),
|
||||||
|
"type-b": np.array(
|
||||||
|
[
|
||||||
|
[1.0],
|
||||||
|
[0.7],
|
||||||
|
[0.8],
|
||||||
|
]
|
||||||
|
),
|
||||||
|
}
|
||||||
|
expected_y = {
|
||||||
|
"type-a": np.array([[0], [0], [1]]),
|
||||||
|
"type-b": np.array([[1], [0], [0]]),
|
||||||
}
|
}
|
||||||
expected_y = {"type-a": [[0], [0], [1]], "type-b": [[1], [0], [0]]}
|
|
||||||
|
|
||||||
# Should build X and Y matrices correctly
|
# Should build X and Y matrices correctly
|
||||||
assert component.x(instances) == expected_x
|
actual_x = component.x(instances)
|
||||||
assert component.y(instances) == expected_y
|
actual_y = component.y(instances)
|
||||||
|
for category in ["type-a", "type-b"]:
|
||||||
|
np.testing.assert_array_equal(actual_x[category], expected_x[category])
|
||||||
|
np.testing.assert_array_equal(actual_y[category], expected_y[category])
|
||||||
|
|
||||||
# Should pass along X and Y matrices to classifiers
|
# Should pass along X and Y matrices to classifiers
|
||||||
component.fit(instances)
|
component.fit(instances)
|
||||||
component.classifiers["type-a"].fit.assert_called_with(
|
for category in ["type-a", "type-b"]:
|
||||||
expected_x["type-a"],
|
actual_x = component.classifiers[category].fit.call_args[0][0]
|
||||||
expected_y["type-a"],
|
actual_y = component.classifiers[category].fit.call_args[0][1]
|
||||||
)
|
np.testing.assert_array_equal(actual_x, expected_x[category])
|
||||||
component.classifiers["type-b"].fit.assert_called_with(
|
np.testing.assert_array_equal(actual_y, expected_y[category])
|
||||||
expected_x["type-b"],
|
|
||||||
expected_y["type-b"],
|
|
||||||
)
|
|
||||||
|
|
||||||
assert component.predict(expected_x) == {"type-a": [[1]], "type-b": [[0], [1]]}
|
assert component.predict(expected_x) == {"type-a": [[1]], "type-b": [[0], [1]]}
|
||||||
|
|
||||||
@@ -263,3 +300,71 @@ def test_x_y_fit_predict_evaluate():
|
|||||||
assert ev["True negative"] == 1
|
assert ev["True negative"] == 1
|
||||||
assert ev["False positive"] == 1
|
assert ev["False positive"] == 1
|
||||||
assert ev["False negative"] == 0
|
assert ev["False negative"] == 0
|
||||||
|
|
||||||
|
|
||||||
|
def test_x_multiple_solves():
|
||||||
|
instance = Mock(spec=Instance)
|
||||||
|
instance.training_data = [
|
||||||
|
{
|
||||||
|
"slacks": {
|
||||||
|
"c1": 0.00,
|
||||||
|
"c2": 0.05,
|
||||||
|
"c3": 0.00,
|
||||||
|
"c4": 30.0,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"slacks": {
|
||||||
|
"c1": 0.00,
|
||||||
|
"c2": 0.00,
|
||||||
|
"c3": 1.00,
|
||||||
|
"c4": 0.0,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
]
|
||||||
|
instance.get_constraint_category = Mock(
|
||||||
|
side_effect=lambda cid: {
|
||||||
|
"c1": None,
|
||||||
|
"c2": "type-a",
|
||||||
|
"c3": "type-a",
|
||||||
|
"c4": "type-b",
|
||||||
|
}[cid]
|
||||||
|
)
|
||||||
|
instance.get_constraint_features = Mock(
|
||||||
|
side_effect=lambda cid: {
|
||||||
|
"c2": np.array([1.0, 0.0]),
|
||||||
|
"c3": np.array([0.5, 0.5]),
|
||||||
|
"c4": np.array([1.0]),
|
||||||
|
}[cid]
|
||||||
|
)
|
||||||
|
|
||||||
|
expected_x = {
|
||||||
|
"type-a": np.array(
|
||||||
|
[
|
||||||
|
[1.0, 0.0],
|
||||||
|
[0.5, 0.5],
|
||||||
|
[1.0, 0.0],
|
||||||
|
[0.5, 0.5],
|
||||||
|
]
|
||||||
|
),
|
||||||
|
"type-b": np.array(
|
||||||
|
[
|
||||||
|
[1.0],
|
||||||
|
[1.0],
|
||||||
|
]
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
expected_y = {
|
||||||
|
"type-a": np.array([[1], [0], [0], [1]]),
|
||||||
|
"type-b": np.array([[1], [0]]),
|
||||||
|
}
|
||||||
|
|
||||||
|
# Should build X and Y matrices correctly
|
||||||
|
component = DropRedundantInequalitiesStep()
|
||||||
|
actual_x = component.x([instance])
|
||||||
|
actual_y = component.y([instance])
|
||||||
|
print(actual_x)
|
||||||
|
for category in ["type-a", "type-b"]:
|
||||||
|
np.testing.assert_array_equal(actual_x[category], expected_x[category])
|
||||||
|
np.testing.assert_array_equal(actual_y[category], expected_y[category])
|
||||||
@@ -27,9 +27,9 @@ def test_composite():
|
|||||||
c2.before_solve.assert_has_calls([call(solver, instance, model)])
|
c2.before_solve.assert_has_calls([call(solver, instance, model)])
|
||||||
|
|
||||||
# Should broadcast after_solve
|
# Should broadcast after_solve
|
||||||
cc.after_solve(solver, instance, model, {})
|
cc.after_solve(solver, instance, model, {}, {})
|
||||||
c1.after_solve.assert_has_calls([call(solver, instance, model, {})])
|
c1.after_solve.assert_has_calls([call(solver, instance, model, {}, {})])
|
||||||
c2.after_solve.assert_has_calls([call(solver, instance, model, {})])
|
c2.after_solve.assert_has_calls([call(solver, instance, model, {}, {})])
|
||||||
|
|
||||||
# Should broadcast fit
|
# Should broadcast fit
|
||||||
cc.fit([1, 2, 3])
|
cc.fit([1, 2, 3])
|
||||||
|
|||||||
@@ -28,13 +28,16 @@ class InstanceIterator:
|
|||||||
result = self.instances[self.current]
|
result = self.instances[self.current]
|
||||||
self.current += 1
|
self.current += 1
|
||||||
if isinstance(result, str):
|
if isinstance(result, str):
|
||||||
logger.info("Read: %s" % result)
|
logger.debug("Read: %s" % result)
|
||||||
if result.endswith(".gz"):
|
try:
|
||||||
with gzip.GzipFile(result, "rb") as file:
|
if result.endswith(".gz"):
|
||||||
result = pickle.load(file)
|
with gzip.GzipFile(result, "rb") as file:
|
||||||
else:
|
result = pickle.load(file)
|
||||||
with open(result, "rb") as file:
|
else:
|
||||||
result = pickle.load(file)
|
with open(result, "rb") as file:
|
||||||
|
result = pickle.load(file)
|
||||||
|
except pickle.UnpicklingError:
|
||||||
|
raise Exception(f"Invalid instance file: {result}")
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -2,14 +2,14 @@
|
|||||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||||
# Released under the modified BSD license. See COPYING.md for more details.
|
# Released under the modified BSD license. See COPYING.md for more details.
|
||||||
|
|
||||||
|
import networkx as nx
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import pyomo.environ as pe
|
import pyomo.environ as pe
|
||||||
import networkx as nx
|
from scipy.stats import uniform, randint
|
||||||
from miplearn import Instance
|
|
||||||
import random
|
|
||||||
from scipy.stats import uniform, randint, bernoulli
|
|
||||||
from scipy.stats.distributions import rv_frozen
|
from scipy.stats.distributions import rv_frozen
|
||||||
|
|
||||||
|
from miplearn import Instance
|
||||||
|
|
||||||
|
|
||||||
class ChallengeA:
|
class ChallengeA:
|
||||||
def __init__(
|
def __init__(
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ import re
|
|||||||
import sys
|
import sys
|
||||||
import logging
|
import logging
|
||||||
from io import StringIO
|
from io import StringIO
|
||||||
|
from random import randint
|
||||||
|
|
||||||
from . import RedirectOutput
|
from . import RedirectOutput
|
||||||
from .internal import InternalSolver
|
from .internal import InternalSolver
|
||||||
@@ -33,6 +34,7 @@ class GurobiSolver(InternalSolver):
|
|||||||
"""
|
"""
|
||||||
if params is None:
|
if params is None:
|
||||||
params = {}
|
params = {}
|
||||||
|
params["InfUnbdInfo"] = True
|
||||||
from gurobipy import GRB
|
from gurobipy import GRB
|
||||||
|
|
||||||
self.GRB = GRB
|
self.GRB = GRB
|
||||||
@@ -84,16 +86,19 @@ class GurobiSolver(InternalSolver):
|
|||||||
self._bin_vars[name] = {}
|
self._bin_vars[name] = {}
|
||||||
self._bin_vars[name][idx] = var
|
self._bin_vars[name][idx] = var
|
||||||
|
|
||||||
def _apply_params(self):
|
def _apply_params(self, streams):
|
||||||
for (name, value) in self.params.items():
|
with RedirectOutput(streams):
|
||||||
self.model.setParam(name, value)
|
for (name, value) in self.params.items():
|
||||||
|
self.model.setParam(name, value)
|
||||||
|
if "seed" not in [k.lower() for k in self.params.keys()]:
|
||||||
|
self.model.setParam("Seed", randint(0, 1_000_000))
|
||||||
|
|
||||||
def solve_lp(self, tee=False):
|
def solve_lp(self, tee=False):
|
||||||
self._raise_if_callback()
|
self._raise_if_callback()
|
||||||
self._apply_params()
|
|
||||||
streams = [StringIO()]
|
streams = [StringIO()]
|
||||||
if tee:
|
if tee:
|
||||||
streams += [sys.stdout]
|
streams += [sys.stdout]
|
||||||
|
self._apply_params(streams)
|
||||||
for (varname, vardict) in self._bin_vars.items():
|
for (varname, vardict) in self._bin_vars.items():
|
||||||
for (idx, var) in vardict.items():
|
for (idx, var) in vardict.items():
|
||||||
var.vtype = self.GRB.CONTINUOUS
|
var.vtype = self.GRB.CONTINUOUS
|
||||||
@@ -122,16 +127,15 @@ class GurobiSolver(InternalSolver):
|
|||||||
|
|
||||||
if lazy_cb:
|
if lazy_cb:
|
||||||
self.params["LazyConstraints"] = 1
|
self.params["LazyConstraints"] = 1
|
||||||
self._apply_params()
|
|
||||||
total_wallclock_time = 0
|
total_wallclock_time = 0
|
||||||
total_nodes = 0
|
total_nodes = 0
|
||||||
streams = [StringIO()]
|
streams = [StringIO()]
|
||||||
if tee:
|
if tee:
|
||||||
streams += [sys.stdout]
|
streams += [sys.stdout]
|
||||||
|
self._apply_params(streams)
|
||||||
if iteration_cb is None:
|
if iteration_cb is None:
|
||||||
iteration_cb = lambda: False
|
iteration_cb = lambda: False
|
||||||
while True:
|
while True:
|
||||||
logger.debug("Solving MIP...")
|
|
||||||
with RedirectOutput(streams):
|
with RedirectOutput(streams):
|
||||||
if lazy_cb is None:
|
if lazy_cb is None:
|
||||||
self.model.optimize()
|
self.model.optimize()
|
||||||
@@ -161,6 +165,12 @@ class GurobiSolver(InternalSolver):
|
|||||||
"Warm start value": self._extract_warm_start_value(log),
|
"Warm start value": self._extract_warm_start_value(log),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
def get_sense(self):
|
||||||
|
if self.model.modelSense == 1:
|
||||||
|
return "min"
|
||||||
|
else:
|
||||||
|
return "max"
|
||||||
|
|
||||||
def get_solution(self):
|
def get_solution(self):
|
||||||
self._raise_if_callback()
|
self._raise_if_callback()
|
||||||
|
|
||||||
@@ -175,6 +185,16 @@ class GurobiSolver(InternalSolver):
|
|||||||
var = self._all_vars[var_name][index]
|
var = self._all_vars[var_name][index]
|
||||||
return self._get_value(var)
|
return self._get_value(var)
|
||||||
|
|
||||||
|
def is_infeasible(self):
|
||||||
|
return self.model.status in [self.GRB.INFEASIBLE, self.GRB.INF_OR_UNBD]
|
||||||
|
|
||||||
|
def get_dual(self, cid):
|
||||||
|
c = self.model.getConstrByName(cid)
|
||||||
|
if self.is_infeasible():
|
||||||
|
return c.farkasDual
|
||||||
|
else:
|
||||||
|
return c.pi
|
||||||
|
|
||||||
def _get_value(self, var):
|
def _get_value(self, var):
|
||||||
if self.cb_where == self.GRB.Callback.MIPSOL:
|
if self.cb_where == self.GRB.Callback.MIPSOL:
|
||||||
return self.model.cbGetSolution(var)
|
return self.model.cbGetSolution(var)
|
||||||
@@ -271,13 +291,18 @@ class GurobiSolver(InternalSolver):
|
|||||||
else:
|
else:
|
||||||
raise Exception("Unknown sense: %s" % sense)
|
raise Exception("Unknown sense: %s" % sense)
|
||||||
|
|
||||||
def get_constraint_slacks(self):
|
def get_inequality_slacks(self):
|
||||||
return {c.ConstrName: c.Slack for c in self.model.getConstrs()}
|
ineqs = [c for c in self.model.getConstrs() if c.sense != "="]
|
||||||
|
return {c.ConstrName: c.Slack for c in ineqs}
|
||||||
|
|
||||||
def set_constraint_sense(self, cid, sense):
|
def set_constraint_sense(self, cid, sense):
|
||||||
c = self.model.getConstrByName(cid)
|
c = self.model.getConstrByName(cid)
|
||||||
c.Sense = sense
|
c.Sense = sense
|
||||||
|
|
||||||
|
def get_constraint_sense(self, cid):
|
||||||
|
c = self.model.getConstrByName(cid)
|
||||||
|
return c.Sense
|
||||||
|
|
||||||
def set_constraint_rhs(self, cid, rhs):
|
def set_constraint_rhs(self, cid, rhs):
|
||||||
c = self.model.getConstrByName(cid)
|
c = self.model.getConstrByName(cid)
|
||||||
c.RHS = rhs
|
c.RHS = rhs
|
||||||
|
|||||||
@@ -184,13 +184,39 @@ class InternalSolver(ABC):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def get_constraint_slacks(self):
|
def get_inequality_slacks(self):
|
||||||
"""
|
"""
|
||||||
Returns a dictionary mapping constraint name to the constraint slack
|
Returns a dictionary mapping constraint name to the constraint slack
|
||||||
in the current solution.
|
in the current solution.
|
||||||
"""
|
"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def is_infeasible(self):
|
||||||
|
"""
|
||||||
|
Returns True if the model has been proved to be infeasible.
|
||||||
|
Must be called after solve.
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_dual(self, cid):
|
||||||
|
"""
|
||||||
|
If the model is feasible and has been solved to optimality, returns the optimal
|
||||||
|
value of the dual variable associated with this constraint. If the model is infeasible,
|
||||||
|
returns a portion of the infeasibility certificate corresponding to the given constraint.
|
||||||
|
|
||||||
|
Solve must be called prior to this method.
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_sense(self):
|
||||||
|
"""
|
||||||
|
Returns the sense of the problem (either "min" or "max").
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def is_constraint_satisfied(self, cobj):
|
def is_constraint_satisfied(self, cobj):
|
||||||
pass
|
pass
|
||||||
@@ -199,6 +225,10 @@ class InternalSolver(ABC):
|
|||||||
def set_constraint_sense(self, cid, sense):
|
def set_constraint_sense(self, cid, sense):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_constraint_sense(self, cid):
|
||||||
|
pass
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def set_constraint_rhs(self, cid, rhs):
|
def set_constraint_rhs(self, cid, rhs):
|
||||||
pass
|
pass
|
||||||
|
|||||||
@@ -11,7 +11,9 @@ import gzip
|
|||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
from typing import Optional, List
|
from typing import Optional, List
|
||||||
from p_tqdm import p_map
|
from p_tqdm import p_map
|
||||||
|
from tempfile import NamedTemporaryFile
|
||||||
|
|
||||||
|
from . import RedirectOutput
|
||||||
from .. import (
|
from .. import (
|
||||||
ObjectiveValueComponent,
|
ObjectiveValueComponent,
|
||||||
PrimalSolutionComponent,
|
PrimalSolutionComponent,
|
||||||
@@ -23,7 +25,6 @@ from .pyomo.gurobi import GurobiPyomoSolver
|
|||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
# Global memory for multiprocessing
|
# Global memory for multiprocessing
|
||||||
SOLVER = [None] # type: List[Optional[LearningSolver]]
|
SOLVER = [None] # type: List[Optional[LearningSolver]]
|
||||||
INSTANCES = [None] # type: List[Optional[dict]]
|
INSTANCES = [None] # type: List[Optional[dict]]
|
||||||
@@ -44,6 +45,52 @@ def _parallel_solve(idx):
|
|||||||
|
|
||||||
|
|
||||||
class LearningSolver:
|
class LearningSolver:
|
||||||
|
"""
|
||||||
|
Mixed-Integer Linear Programming (MIP) solver that extracts information
|
||||||
|
from previous runs and uses Machine Learning methods to accelerate the
|
||||||
|
solution of new (yet unseen) instances.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
components
|
||||||
|
Set of components in the solver. By default, includes:
|
||||||
|
- ObjectiveValueComponent
|
||||||
|
- PrimalSolutionComponent
|
||||||
|
- DynamicLazyConstraintsComponent
|
||||||
|
- UserCutsComponent
|
||||||
|
gap_tolerance
|
||||||
|
Relative MIP gap tolerance. By default, 1e-4.
|
||||||
|
mode
|
||||||
|
If "exact", solves problem to optimality, keeping all optimality
|
||||||
|
guarantees provided by the MIP solver. If "heuristic", uses machine
|
||||||
|
learning more aggressively, and may return suboptimal solutions.
|
||||||
|
solver
|
||||||
|
The internal MIP solver to use. Can be either "cplex", "gurobi", a
|
||||||
|
solver class such as GurobiSolver, or a solver instance such as
|
||||||
|
GurobiSolver().
|
||||||
|
threads
|
||||||
|
Maximum number of threads to use. If None, uses solver default.
|
||||||
|
time_limit
|
||||||
|
Maximum running time in seconds. If None, uses solver default.
|
||||||
|
node_limit
|
||||||
|
Maximum number of branch-and-bound nodes to explore. If None, uses
|
||||||
|
solver default.
|
||||||
|
use_lazy_cb
|
||||||
|
If True, uses lazy callbacks to enforce lazy constraints, instead of
|
||||||
|
a simple solver loop. This functionality may not supported by
|
||||||
|
all internal MIP solvers.
|
||||||
|
solve_lp_first: bool
|
||||||
|
If true, solve LP relaxation first, then solve original MILP. This
|
||||||
|
option should be activated if the LP relaxation is not very
|
||||||
|
expensive to solve and if it provides good hints for the integer
|
||||||
|
solution.
|
||||||
|
simulate_perfect: bool
|
||||||
|
If true, each call to solve actually performs three actions: solve
|
||||||
|
the original problem, train the ML models on the data that was just
|
||||||
|
collected, and solve the problem again. This is useful for evaluating
|
||||||
|
the theoretical performance of perfect ML models.
|
||||||
|
"""
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
components=None,
|
components=None,
|
||||||
@@ -55,47 +102,8 @@ class LearningSolver:
|
|||||||
node_limit=None,
|
node_limit=None,
|
||||||
solve_lp_first=True,
|
solve_lp_first=True,
|
||||||
use_lazy_cb=False,
|
use_lazy_cb=False,
|
||||||
|
simulate_perfect=False,
|
||||||
):
|
):
|
||||||
"""
|
|
||||||
Mixed-Integer Linear Programming (MIP) solver that extracts information
|
|
||||||
from previous runs and uses Machine Learning methods to accelerate the
|
|
||||||
solution of new (yet unseen) instances.
|
|
||||||
|
|
||||||
Parameters
|
|
||||||
----------
|
|
||||||
components
|
|
||||||
Set of components in the solver. By default, includes:
|
|
||||||
- ObjectiveValueComponent
|
|
||||||
- PrimalSolutionComponent
|
|
||||||
- DynamicLazyConstraintsComponent
|
|
||||||
- UserCutsComponent
|
|
||||||
gap_tolerance
|
|
||||||
Relative MIP gap tolerance. By default, 1e-4.
|
|
||||||
mode
|
|
||||||
If "exact", solves problem to optimality, keeping all optimality
|
|
||||||
guarantees provided by the MIP solver. If "heuristic", uses machine
|
|
||||||
learning more agressively, and may return suboptimal solutions.
|
|
||||||
solver
|
|
||||||
The internal MIP solver to use. Can be either "cplex", "gurobi", a
|
|
||||||
solver class such as GurobiSolver, or a solver instance such as
|
|
||||||
GurobiSolver().
|
|
||||||
threads
|
|
||||||
Maximum number of threads to use. If None, uses solver default.
|
|
||||||
time_limit
|
|
||||||
Maximum running time in seconds. If None, uses solver default.
|
|
||||||
node_limit
|
|
||||||
Maximum number of branch-and-bound nodes to explore. If None, uses
|
|
||||||
solver default.
|
|
||||||
use_lazy_cb
|
|
||||||
If True, uses lazy callbacks to enforce lazy constraints, instead of
|
|
||||||
a simple solver loop. This functionality may not supported by
|
|
||||||
all internal MIP solvers.
|
|
||||||
solve_lp_first: bool
|
|
||||||
If true, solve LP relaxation first, then solve original MILP. This
|
|
||||||
option should be activated if the LP relaxation is not very
|
|
||||||
expensive to solve and if it provides good hints for the integer
|
|
||||||
solution.
|
|
||||||
"""
|
|
||||||
self.components = {}
|
self.components = {}
|
||||||
self.mode = mode
|
self.mode = mode
|
||||||
self.internal_solver = None
|
self.internal_solver = None
|
||||||
@@ -107,6 +115,7 @@ class LearningSolver:
|
|||||||
self.node_limit = node_limit
|
self.node_limit = node_limit
|
||||||
self.solve_lp_first = solve_lp_first
|
self.solve_lp_first = solve_lp_first
|
||||||
self.use_lazy_cb = use_lazy_cb
|
self.use_lazy_cb = use_lazy_cb
|
||||||
|
self.simulate_perfect = simulate_perfect
|
||||||
|
|
||||||
if components is not None:
|
if components is not None:
|
||||||
for comp in components:
|
for comp in components:
|
||||||
@@ -202,7 +211,31 @@ class LearningSolver:
|
|||||||
"Predicted UB". See the documentation of each component for more
|
"Predicted UB". See the documentation of each component for more
|
||||||
details.
|
details.
|
||||||
"""
|
"""
|
||||||
|
if self.simulate_perfect:
|
||||||
|
if not isinstance(instance, str):
|
||||||
|
raise Exception("Not implemented")
|
||||||
|
with tempfile.NamedTemporaryFile(suffix=os.path.basename(instance)) as tmp:
|
||||||
|
self._solve(
|
||||||
|
instance=instance,
|
||||||
|
model=model,
|
||||||
|
output=tmp.name,
|
||||||
|
tee=tee,
|
||||||
|
)
|
||||||
|
self.fit([tmp.name])
|
||||||
|
return self._solve(
|
||||||
|
instance=instance,
|
||||||
|
model=model,
|
||||||
|
output=output,
|
||||||
|
tee=tee,
|
||||||
|
)
|
||||||
|
|
||||||
|
def _solve(
|
||||||
|
self,
|
||||||
|
instance,
|
||||||
|
model=None,
|
||||||
|
output="",
|
||||||
|
tee=False,
|
||||||
|
):
|
||||||
filename = None
|
filename = None
|
||||||
fileformat = None
|
fileformat = None
|
||||||
if isinstance(instance, str):
|
if isinstance(instance, str):
|
||||||
@@ -218,7 +251,8 @@ class LearningSolver:
|
|||||||
instance = pickle.load(file)
|
instance = pickle.load(file)
|
||||||
|
|
||||||
if model is None:
|
if model is None:
|
||||||
model = instance.to_model()
|
with RedirectOutput([]):
|
||||||
|
model = instance.to_model()
|
||||||
|
|
||||||
self.tee = tee
|
self.tee = tee
|
||||||
self.internal_solver = self._create_internal_solver()
|
self.internal_solver = self._create_internal_solver()
|
||||||
@@ -253,22 +287,27 @@ class LearningSolver:
|
|||||||
lazy_cb = lazy_cb_wrapper
|
lazy_cb = lazy_cb_wrapper
|
||||||
|
|
||||||
logger.info("Solving MILP...")
|
logger.info("Solving MILP...")
|
||||||
results = self.internal_solver.solve(
|
stats = self.internal_solver.solve(
|
||||||
tee=tee,
|
tee=tee,
|
||||||
iteration_cb=iteration_cb,
|
iteration_cb=iteration_cb,
|
||||||
lazy_cb=lazy_cb,
|
lazy_cb=lazy_cb,
|
||||||
)
|
)
|
||||||
results["LP value"] = instance.lp_value
|
stats["LP value"] = instance.lp_value
|
||||||
|
|
||||||
# Read MIP solution and bounds
|
# Read MIP solution and bounds
|
||||||
instance.lower_bound = results["Lower bound"]
|
instance.lower_bound = stats["Lower bound"]
|
||||||
instance.upper_bound = results["Upper bound"]
|
instance.upper_bound = stats["Upper bound"]
|
||||||
instance.solver_log = results["Log"]
|
instance.solver_log = stats["Log"]
|
||||||
instance.solution = self.internal_solver.get_solution()
|
instance.solution = self.internal_solver.get_solution()
|
||||||
|
|
||||||
logger.debug("Calling after_solve callbacks...")
|
logger.debug("Calling after_solve callbacks...")
|
||||||
|
training_data = {}
|
||||||
for component in self.components.values():
|
for component in self.components.values():
|
||||||
component.after_solve(self, instance, model, results)
|
component.after_solve(self, instance, model, stats, training_data)
|
||||||
|
|
||||||
|
if not hasattr(instance, "training_data"):
|
||||||
|
instance.training_data = []
|
||||||
|
instance.training_data += [training_data]
|
||||||
|
|
||||||
if filename is not None and output is not None:
|
if filename is not None and output is not None:
|
||||||
output_filename = output
|
output_filename = output
|
||||||
@@ -282,7 +321,7 @@ class LearningSolver:
|
|||||||
with gzip.GzipFile(output_filename, "wb") as file:
|
with gzip.GzipFile(output_filename, "wb") as file:
|
||||||
pickle.dump(instance, file)
|
pickle.dump(instance, file)
|
||||||
|
|
||||||
return results
|
return stats
|
||||||
|
|
||||||
def parallel_solve(self, instances, n_jobs=4, label="Solve", output=[]):
|
def parallel_solve(self, instances, n_jobs=4, label="Solve", output=[]):
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -256,11 +256,23 @@ class BasePyomoSolver(InternalSolver):
|
|||||||
def relax(self):
|
def relax(self):
|
||||||
raise Exception("not implemented")
|
raise Exception("not implemented")
|
||||||
|
|
||||||
def get_constraint_slacks(self):
|
def get_inequality_slacks(self):
|
||||||
raise Exception("not implemented")
|
raise Exception("not implemented")
|
||||||
|
|
||||||
def set_constraint_sense(self, cid, sense):
|
def set_constraint_sense(self, cid, sense):
|
||||||
raise Exception("Not implemented")
|
raise Exception("Not implemented")
|
||||||
|
|
||||||
|
def get_constraint_sense(self, cid):
|
||||||
|
raise Exception("Not implemented")
|
||||||
|
|
||||||
def set_constraint_rhs(self, cid, rhs):
|
def set_constraint_rhs(self, cid, rhs):
|
||||||
raise Exception("Not implemented")
|
raise Exception("Not implemented")
|
||||||
|
|
||||||
|
def is_infeasible(self):
|
||||||
|
raise Exception("Not implemented")
|
||||||
|
|
||||||
|
def get_dual(self, cid):
|
||||||
|
raise Exception("Not implemented")
|
||||||
|
|
||||||
|
def get_sense(self):
|
||||||
|
raise Exception("Not implemented")
|
||||||
|
|||||||
@@ -7,8 +7,11 @@ import pickle
|
|||||||
import tempfile
|
import tempfile
|
||||||
import os
|
import os
|
||||||
|
|
||||||
from miplearn import DynamicLazyConstraintsComponent
|
from miplearn import (
|
||||||
from miplearn import LearningSolver
|
LearningSolver,
|
||||||
|
GurobiSolver,
|
||||||
|
DynamicLazyConstraintsComponent,
|
||||||
|
)
|
||||||
|
|
||||||
from . import _get_instance, _get_internal_solvers
|
from . import _get_instance, _get_internal_solvers
|
||||||
|
|
||||||
@@ -109,3 +112,18 @@ def test_solve_fit_from_disk():
|
|||||||
os.remove(filename)
|
os.remove(filename)
|
||||||
for filename in output:
|
for filename in output:
|
||||||
os.remove(filename)
|
os.remove(filename)
|
||||||
|
|
||||||
|
|
||||||
|
def test_simulate_perfect():
|
||||||
|
internal_solver = GurobiSolver()
|
||||||
|
instance = _get_instance(internal_solver)
|
||||||
|
with tempfile.NamedTemporaryFile(suffix=".pkl", delete=False) as tmp:
|
||||||
|
pickle.dump(instance, tmp)
|
||||||
|
tmp.flush()
|
||||||
|
solver = LearningSolver(
|
||||||
|
solver=internal_solver,
|
||||||
|
simulate_perfect=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
stats = solver.solve(tmp.name)
|
||||||
|
assert stats["Lower bound"] == stats["Predicted LB"]
|
||||||
|
|||||||
@@ -27,11 +27,11 @@ def test_benchmark():
|
|||||||
benchmark = BenchmarkRunner(test_solvers)
|
benchmark = BenchmarkRunner(test_solvers)
|
||||||
benchmark.fit(train_instances)
|
benchmark.fit(train_instances)
|
||||||
benchmark.parallel_solve(test_instances, n_jobs=2, n_trials=2)
|
benchmark.parallel_solve(test_instances, n_jobs=2, n_trials=2)
|
||||||
assert benchmark.raw_results().values.shape == (12, 16)
|
assert benchmark.raw_results().values.shape == (12, 19)
|
||||||
|
|
||||||
benchmark.save_results("/tmp/benchmark.csv")
|
benchmark.save_results("/tmp/benchmark.csv")
|
||||||
assert os.path.isfile("/tmp/benchmark.csv")
|
assert os.path.isfile("/tmp/benchmark.csv")
|
||||||
|
|
||||||
benchmark = BenchmarkRunner(test_solvers)
|
benchmark = BenchmarkRunner(test_solvers)
|
||||||
benchmark.load_results("/tmp/benchmark.csv")
|
benchmark.load_results("/tmp/benchmark.csv")
|
||||||
assert benchmark.raw_results().values.shape == (12, 16)
|
assert benchmark.raw_results().values.shape == (12, 19)
|
||||||
|
|||||||
Reference in New Issue
Block a user