mirror of
https://github.com/ANL-CEEESA/MIPLearn.git
synced 2025-12-06 01:18:52 -06:00
Move python files to root folder; remove built docs
This commit is contained in:
30
miplearn/__init__.py
Normal file
30
miplearn/__init__.py
Normal file
@@ -0,0 +1,30 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from .extractors import (SolutionExtractor,
|
||||
InstanceFeaturesExtractor,
|
||||
ObjectiveValueExtractor,
|
||||
VariableFeaturesExtractor)
|
||||
|
||||
from .components.component import Component
|
||||
from .components.objective import ObjectiveValueComponent
|
||||
from .components.lazy import LazyConstraintsComponent
|
||||
from .components.cuts import UserCutsComponent
|
||||
from .components.primal import PrimalSolutionComponent
|
||||
|
||||
from .classifiers.adaptive import AdaptiveClassifier
|
||||
from .classifiers.threshold import MinPrecisionThreshold
|
||||
|
||||
from .benchmark import BenchmarkRunner
|
||||
|
||||
from .instance import Instance
|
||||
|
||||
from .solvers.pyomo.base import BasePyomoSolver
|
||||
from .solvers.pyomo.cplex import CplexPyomoSolver
|
||||
from .solvers.pyomo.gurobi import GurobiPyomoSolver
|
||||
from .solvers.guroby import GurobiSolver
|
||||
from .solvers.internal import InternalSolver
|
||||
from .solvers.learning import LearningSolver
|
||||
|
||||
from .log import setup_logger
|
||||
182
miplearn/benchmark.py
Normal file
182
miplearn/benchmark.py
Normal file
@@ -0,0 +1,182 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from copy import deepcopy
|
||||
|
||||
import pandas as pd
|
||||
from tqdm.auto import tqdm
|
||||
|
||||
from .solvers.learning import LearningSolver
|
||||
|
||||
|
||||
class BenchmarkRunner:
|
||||
def __init__(self, solvers):
|
||||
assert isinstance(solvers, dict)
|
||||
for solver in solvers.values():
|
||||
assert isinstance(solver, LearningSolver)
|
||||
self.solvers = solvers
|
||||
self.results = None
|
||||
|
||||
def solve(self, instances, tee=False):
|
||||
for (name, solver) in self.solvers.items():
|
||||
for i in tqdm(range(len((instances)))):
|
||||
results = solver.solve(deepcopy(instances[i]), tee=tee)
|
||||
self._push_result(results, solver=solver, name=name, instance=i)
|
||||
|
||||
def parallel_solve(self, instances, n_jobs=1, n_trials=1):
|
||||
instances = instances * n_trials
|
||||
for (name, solver) in self.solvers.items():
|
||||
results = solver.parallel_solve(instances,
|
||||
n_jobs=n_jobs,
|
||||
label="Solve (%s)" % name)
|
||||
for i in range(len(instances)):
|
||||
self._push_result(results[i],
|
||||
solver=solver,
|
||||
name=name,
|
||||
instance=i)
|
||||
|
||||
def raw_results(self):
|
||||
return self.results
|
||||
|
||||
def save_results(self, filename):
|
||||
self.results.to_csv(filename)
|
||||
|
||||
def load_results(self, filename):
|
||||
self.results = pd.read_csv(filename, index_col=0)
|
||||
|
||||
def load_state(self, filename):
|
||||
for (name, solver) in self.solvers.items():
|
||||
solver.load_state(filename)
|
||||
|
||||
def fit(self, training_instances):
|
||||
for (name, solver) in self.solvers.items():
|
||||
solver.fit(training_instances)
|
||||
|
||||
def _push_result(self, result, solver, name, instance):
|
||||
if self.results is None:
|
||||
self.results = pd.DataFrame(columns=["Solver",
|
||||
"Instance",
|
||||
"Wallclock Time",
|
||||
"Lower Bound",
|
||||
"Upper Bound",
|
||||
"Gap",
|
||||
"Nodes",
|
||||
"Mode",
|
||||
"Sense",
|
||||
"Predicted LB",
|
||||
"Predicted UB",
|
||||
])
|
||||
lb = result["Lower bound"]
|
||||
ub = result["Upper bound"]
|
||||
gap = (ub - lb) / lb
|
||||
if "Predicted LB" not in result:
|
||||
result["Predicted LB"] = float("nan")
|
||||
result["Predicted UB"] = float("nan")
|
||||
self.results = self.results.append({
|
||||
"Solver": name,
|
||||
"Instance": instance,
|
||||
"Wallclock Time": result["Wallclock time"],
|
||||
"Lower Bound": lb,
|
||||
"Upper Bound": ub,
|
||||
"Gap": gap,
|
||||
"Nodes": result["Nodes"],
|
||||
"Mode": solver.mode,
|
||||
"Sense": result["Sense"],
|
||||
"Predicted LB": result["Predicted LB"],
|
||||
"Predicted UB": result["Predicted UB"],
|
||||
}, ignore_index=True)
|
||||
groups = self.results.groupby("Instance")
|
||||
best_lower_bound = groups["Lower Bound"].transform("max")
|
||||
best_upper_bound = groups["Upper Bound"].transform("min")
|
||||
best_gap = groups["Gap"].transform("min")
|
||||
best_nodes = groups["Nodes"].transform("min")
|
||||
best_wallclock_time = groups["Wallclock Time"].transform("min")
|
||||
self.results["Relative Lower Bound"] = \
|
||||
self.results["Lower Bound"] / best_lower_bound
|
||||
self.results["Relative Upper Bound"] = \
|
||||
self.results["Upper Bound"] / best_upper_bound
|
||||
self.results["Relative Wallclock Time"] = \
|
||||
self.results["Wallclock Time"] / best_wallclock_time
|
||||
self.results["Relative Gap"] = \
|
||||
self.results["Gap"] / best_gap
|
||||
self.results["Relative Nodes"] = \
|
||||
self.results["Nodes"] / best_nodes
|
||||
|
||||
def save_chart(self, filename):
|
||||
import matplotlib.pyplot as plt
|
||||
import seaborn as sns
|
||||
from numpy import median
|
||||
|
||||
sns.set_style("whitegrid")
|
||||
sns.set_palette("Blues_r")
|
||||
results = self.raw_results()
|
||||
results["Gap (%)"] = results["Gap"] * 100.0
|
||||
|
||||
sense = results.loc[0, "Sense"]
|
||||
if sense == "min":
|
||||
primal_column = "Relative Upper Bound"
|
||||
obj_column = "Upper Bound"
|
||||
predicted_obj_column = "Predicted UB"
|
||||
else:
|
||||
primal_column = "Relative Lower Bound"
|
||||
obj_column = "Lower Bound"
|
||||
predicted_obj_column = "Predicted LB"
|
||||
|
||||
fig, (ax1, ax2, ax3, ax4) = plt.subplots(nrows=1,
|
||||
ncols=4,
|
||||
figsize=(12,4),
|
||||
gridspec_kw={'width_ratios': [2, 1, 1, 2]})
|
||||
|
||||
# Figure 1: Solver x Wallclock Time
|
||||
sns.stripplot(x="Solver",
|
||||
y="Wallclock Time",
|
||||
data=results,
|
||||
ax=ax1,
|
||||
jitter=0.25,
|
||||
size=4.0,
|
||||
);
|
||||
sns.barplot(x="Solver",
|
||||
y="Wallclock Time",
|
||||
data=results,
|
||||
ax=ax1,
|
||||
errwidth=0.,
|
||||
alpha=0.4,
|
||||
estimator=median,
|
||||
);
|
||||
ax1.set(ylabel='Wallclock Time (s)')
|
||||
|
||||
# Figure 2: Solver x Gap (%)
|
||||
ax2.set_ylim(-0.5, 5.5)
|
||||
sns.stripplot(x="Solver",
|
||||
y="Gap (%)",
|
||||
jitter=0.25,
|
||||
data=results[results["Mode"] != "heuristic"],
|
||||
ax=ax2,
|
||||
size=4.0,
|
||||
);
|
||||
|
||||
# Figure 3: Solver x Primal Value
|
||||
ax3.set_ylim(0.95,1.05)
|
||||
sns.stripplot(x="Solver",
|
||||
y=primal_column,
|
||||
jitter=0.25,
|
||||
data=results[results["Mode"] == "heuristic"],
|
||||
ax=ax3,
|
||||
);
|
||||
|
||||
# Figure 4: Predicted vs Actual Objective Value
|
||||
sns.scatterplot(x=obj_column,
|
||||
y=predicted_obj_column,
|
||||
hue="Solver",
|
||||
data=results[results["Mode"] != "heuristic"],
|
||||
ax=ax4,
|
||||
);
|
||||
xlim, ylim = ax4.get_xlim(), ax4.get_ylim()
|
||||
ax4.plot([-1e10, 1e10], [-1e10, 1e10], ls='-', color="#cccccc");
|
||||
ax4.set_xlim(xlim)
|
||||
ax4.set_ylim(ylim)
|
||||
ax4.get_legend().remove()
|
||||
|
||||
fig.tight_layout()
|
||||
plt.savefig(filename, bbox_inches='tight', dpi=150)
|
||||
33
miplearn/classifiers/__init__.py
Normal file
33
miplearn/classifiers/__init__.py
Normal file
@@ -0,0 +1,33 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
||||
class Classifier(ABC):
|
||||
@abstractmethod
|
||||
def fit(self, x_train, y_train):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def predict_proba(self, x_test):
|
||||
pass
|
||||
|
||||
def predict(self, x_test):
|
||||
proba = self.predict_proba(x_test)
|
||||
assert isinstance(proba, np.ndarray)
|
||||
assert proba.shape == (x_test.shape[0], 2)
|
||||
return (proba[:, 1] > 0.5).astype(float)
|
||||
|
||||
|
||||
class Regressor(ABC):
|
||||
@abstractmethod
|
||||
def fit(self, x_train, y_train):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def predict(self):
|
||||
pass
|
||||
66
miplearn/classifiers/adaptive.py
Normal file
66
miplearn/classifiers/adaptive.py
Normal file
@@ -0,0 +1,66 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
import logging
|
||||
from copy import deepcopy
|
||||
|
||||
from miplearn.classifiers import Classifier
|
||||
from miplearn.classifiers.counting import CountingClassifier
|
||||
from miplearn.classifiers.evaluator import ClassifierEvaluator
|
||||
from sklearn.linear_model import LogisticRegression
|
||||
from sklearn.neighbors import KNeighborsClassifier
|
||||
from sklearn.pipeline import make_pipeline
|
||||
from sklearn.preprocessing import StandardScaler
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AdaptiveClassifier(Classifier):
|
||||
"""
|
||||
A meta-classifier which dynamically selects what actual classifier to use
|
||||
based on its cross-validation score on a particular training data set.
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
candidates=None,
|
||||
evaluator=ClassifierEvaluator()):
|
||||
"""
|
||||
Initializes the meta-classifier.
|
||||
"""
|
||||
if candidates is None:
|
||||
candidates = {
|
||||
"knn(100)": {
|
||||
"classifier": KNeighborsClassifier(n_neighbors=100),
|
||||
"min samples": 100,
|
||||
},
|
||||
"logistic": {
|
||||
"classifier": make_pipeline(StandardScaler(),
|
||||
LogisticRegression()),
|
||||
"min samples": 30,
|
||||
},
|
||||
"counting": {
|
||||
"classifier": CountingClassifier(),
|
||||
"min samples": 0,
|
||||
}
|
||||
}
|
||||
self.candidates = candidates
|
||||
self.evaluator = evaluator
|
||||
self.classifier = None
|
||||
|
||||
def fit(self, x_train, y_train):
|
||||
best_name, best_clf, best_score = None, None, -float("inf")
|
||||
n_samples = x_train.shape[0]
|
||||
for (name, clf_dict) in self.candidates.items():
|
||||
if n_samples < clf_dict["min samples"]:
|
||||
continue
|
||||
clf = deepcopy(clf_dict["classifier"])
|
||||
clf.fit(x_train, y_train)
|
||||
score = self.evaluator.evaluate(clf, x_train, y_train)
|
||||
if score > best_score:
|
||||
best_name, best_clf, best_score = name, clf, score
|
||||
logger.debug("Best classifier: %s (score=%.3f)" % (best_name, best_score))
|
||||
self.classifier = best_clf
|
||||
|
||||
def predict_proba(self, x_test):
|
||||
return self.classifier.predict_proba(x_test)
|
||||
28
miplearn/classifiers/counting.py
Normal file
28
miplearn/classifiers/counting.py
Normal file
@@ -0,0 +1,28 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from miplearn.classifiers import Classifier
|
||||
import numpy as np
|
||||
|
||||
|
||||
class CountingClassifier(Classifier):
|
||||
"""
|
||||
A classifier that generates constant predictions, based only on the
|
||||
frequency of the training labels. For example, if y_train is [1.0, 0.0, 0.0]
|
||||
this classifier always returns [0.66 0.33] for any x_test. It essentially
|
||||
counts how many times each label appeared, hence the name.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.mean = None
|
||||
|
||||
def fit(self, x_train, y_train):
|
||||
self.mean = np.mean(y_train)
|
||||
|
||||
def predict_proba(self, x_test):
|
||||
return np.array([[1 - self.mean, self.mean]
|
||||
for _ in range(x_test.shape[0])])
|
||||
|
||||
def __repr__(self):
|
||||
return "CountingClassifier(mean=%s)" % self.mean
|
||||
71
miplearn/classifiers/cv.py
Normal file
71
miplearn/classifiers/cv.py
Normal file
@@ -0,0 +1,71 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from copy import deepcopy
|
||||
|
||||
import numpy as np
|
||||
from miplearn.classifiers import Classifier
|
||||
from sklearn.dummy import DummyClassifier
|
||||
from sklearn.linear_model import LogisticRegression
|
||||
from sklearn.model_selection import cross_val_score
|
||||
|
||||
import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CrossValidatedClassifier(Classifier):
|
||||
"""
|
||||
A meta-classifier that, upon training, evaluates the performance of another
|
||||
classifier on the training data set using k-fold cross validation, then
|
||||
either adopts the other classifier it if the cv-score is high enough, or
|
||||
returns a constant label for every x_test otherwise.
|
||||
|
||||
The threshold is specified in comparison to a dummy classifier trained
|
||||
on the same dataset. For example, a threshold of 0.0 indicates that any
|
||||
classifier as good as the dummy predictor is acceptable. A threshold of 1.0
|
||||
indicates that only classifier with a perfect cross-validation score are
|
||||
acceptable. Other numbers are a linear interpolation of these two extremes.
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
classifier=LogisticRegression(),
|
||||
threshold=0.75,
|
||||
constant=0.0,
|
||||
cv=5,
|
||||
scoring='accuracy'):
|
||||
self.classifier = None
|
||||
self.classifier_prototype = classifier
|
||||
self.constant = constant
|
||||
self.threshold = threshold
|
||||
self.cv = cv
|
||||
self.scoring = scoring
|
||||
|
||||
def fit(self, x_train, y_train):
|
||||
# Calculate dummy score and absolute score threshold
|
||||
y_train_avg = np.average(y_train)
|
||||
dummy_score = max(y_train_avg, 1 - y_train_avg)
|
||||
absolute_threshold = 1. * self.threshold + dummy_score * (1 - self.threshold)
|
||||
|
||||
# Calculate cross validation score and decide which classifier to use
|
||||
clf = deepcopy(self.classifier_prototype)
|
||||
cv_score = float(np.mean(cross_val_score(clf,
|
||||
x_train,
|
||||
y_train,
|
||||
cv=self.cv,
|
||||
scoring=self.scoring)))
|
||||
if cv_score >= absolute_threshold:
|
||||
logger.debug("cv_score is above threshold (%.2f >= %.2f); keeping" %
|
||||
(cv_score, absolute_threshold))
|
||||
self.classifier = clf
|
||||
else:
|
||||
logger.debug("cv_score is below threshold (%.2f < %.2f); discarding" %
|
||||
(cv_score, absolute_threshold))
|
||||
self.classifier = DummyClassifier(strategy="constant",
|
||||
constant=self.constant)
|
||||
|
||||
# Train chosen classifier
|
||||
self.classifier.fit(x_train, y_train)
|
||||
|
||||
def predict_proba(self, x_test):
|
||||
return self.classifier.predict_proba(x_test)
|
||||
15
miplearn/classifiers/evaluator.py
Normal file
15
miplearn/classifiers/evaluator.py
Normal file
@@ -0,0 +1,15 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from sklearn.metrics import roc_auc_score
|
||||
|
||||
|
||||
class ClassifierEvaluator:
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def evaluate(self, clf, x_train, y_train):
|
||||
# FIXME: use cross-validation
|
||||
proba = clf.predict_proba(x_train)
|
||||
return roc_auc_score(y_train, proba[:, 1])
|
||||
3
miplearn/classifiers/tests/__init__.py
Normal file
3
miplearn/classifiers/tests/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
18
miplearn/classifiers/tests/test_counting.py
Normal file
18
miplearn/classifiers/tests/test_counting.py
Normal file
@@ -0,0 +1,18 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
from miplearn.classifiers.counting import CountingClassifier
|
||||
|
||||
import numpy as np
|
||||
from numpy.linalg import norm
|
||||
|
||||
E = 0.1
|
||||
|
||||
|
||||
def test_counting():
|
||||
clf = CountingClassifier()
|
||||
clf.fit(np.zeros((8, 25)), [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0])
|
||||
expected_proba = np.array([[0.375, 0.625],
|
||||
[0.375, 0.625]])
|
||||
actual_proba = clf.predict_proba(np.zeros((2, 25)))
|
||||
assert norm(actual_proba - expected_proba) < E
|
||||
46
miplearn/classifiers/tests/test_cv.py
Normal file
46
miplearn/classifiers/tests/test_cv.py
Normal file
@@ -0,0 +1,46 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
import numpy as np
|
||||
from miplearn.classifiers.cv import CrossValidatedClassifier
|
||||
from numpy.linalg import norm
|
||||
from sklearn.preprocessing import StandardScaler
|
||||
from sklearn.svm import SVC
|
||||
|
||||
E = 0.1
|
||||
|
||||
|
||||
def test_cv():
|
||||
# Training set: label is true if point is inside a 2D circle
|
||||
x_train = np.array([[x1, x2]
|
||||
for x1 in range(-10, 11)
|
||||
for x2 in range(-10, 11)])
|
||||
x_train = StandardScaler().fit_transform(x_train)
|
||||
n_samples = x_train.shape[0]
|
||||
|
||||
y_train = np.array([1.0 if x1*x1 + x2*x2 <= 100 else 0.0
|
||||
for x1 in range(-10, 11)
|
||||
for x2 in range(-10, 11)])
|
||||
|
||||
# Support vector machines with linear kernels do not perform well on this
|
||||
# data set, so predictor should return the given constant.
|
||||
clf = CrossValidatedClassifier(classifier=SVC(probability=True,
|
||||
random_state=42),
|
||||
threshold=0.90,
|
||||
constant=0.0,
|
||||
cv=30)
|
||||
clf.fit(x_train, y_train)
|
||||
assert norm(np.zeros(n_samples) - clf.predict(x_train)) < E
|
||||
|
||||
# Support vector machines with quadratic kernels perform almost perfectly
|
||||
# on this data set, so predictor should return their prediction.
|
||||
clf = CrossValidatedClassifier(classifier=SVC(probability=True,
|
||||
kernel='poly',
|
||||
degree=2,
|
||||
random_state=42),
|
||||
threshold=0.90,
|
||||
cv=30)
|
||||
clf.fit(x_train, y_train)
|
||||
print(y_train - clf.predict(x_train))
|
||||
assert norm(y_train - clf.predict(x_train)) < E
|
||||
20
miplearn/classifiers/tests/test_evaluator.py
Normal file
20
miplearn/classifiers/tests/test_evaluator.py
Normal file
@@ -0,0 +1,20 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
import numpy as np
|
||||
from miplearn.classifiers.evaluator import ClassifierEvaluator
|
||||
from sklearn.neighbors import KNeighborsClassifier
|
||||
|
||||
|
||||
def test_evaluator():
|
||||
clf_a = KNeighborsClassifier(n_neighbors=1)
|
||||
clf_b = KNeighborsClassifier(n_neighbors=2)
|
||||
x_train = np.array([[0, 0], [1, 0]])
|
||||
y_train = np.array([0, 1])
|
||||
clf_a.fit(x_train, y_train)
|
||||
clf_b.fit(x_train, y_train)
|
||||
ev = ClassifierEvaluator()
|
||||
assert ev.evaluate(clf_a, x_train, y_train) == 1.0
|
||||
assert ev.evaluate(clf_b, x_train, y_train) == 0.5
|
||||
|
||||
34
miplearn/classifiers/tests/test_threshold.py
Normal file
34
miplearn/classifiers/tests/test_threshold.py
Normal file
@@ -0,0 +1,34 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from unittest.mock import Mock
|
||||
|
||||
import numpy as np
|
||||
from miplearn.classifiers import Classifier
|
||||
from miplearn.classifiers.threshold import MinPrecisionThreshold
|
||||
|
||||
|
||||
def test_threshold_dynamic():
|
||||
clf = Mock(spec=Classifier)
|
||||
clf.predict_proba = Mock(return_value=np.array([
|
||||
[0.10, 0.90],
|
||||
[0.10, 0.90],
|
||||
[0.20, 0.80],
|
||||
[0.30, 0.70],
|
||||
]))
|
||||
x_train = np.array([0, 1, 2, 3])
|
||||
y_train = np.array([1, 1, 0, 0])
|
||||
|
||||
threshold = MinPrecisionThreshold(min_precision=1.0)
|
||||
assert threshold.find(clf, x_train, y_train) == 0.90
|
||||
|
||||
threshold = MinPrecisionThreshold(min_precision=0.65)
|
||||
assert threshold.find(clf, x_train, y_train) == 0.80
|
||||
|
||||
threshold = MinPrecisionThreshold(min_precision=0.50)
|
||||
assert threshold.find(clf, x_train, y_train) == 0.70
|
||||
|
||||
threshold = MinPrecisionThreshold(min_precision=0.00)
|
||||
assert threshold.find(clf, x_train, y_train) == 0.70
|
||||
|
||||
45
miplearn/classifiers/threshold.py
Normal file
45
miplearn/classifiers/threshold.py
Normal file
@@ -0,0 +1,45 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from abc import abstractmethod, ABC
|
||||
|
||||
import numpy as np
|
||||
from sklearn.metrics._ranking import _binary_clf_curve
|
||||
|
||||
|
||||
class DynamicThreshold(ABC):
|
||||
@abstractmethod
|
||||
def find(self, clf, x_train, y_train):
|
||||
"""
|
||||
Given a trained binary classifier `clf` and a training data set,
|
||||
returns the numerical threshold (float) satisfying some criterea.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class MinPrecisionThreshold(DynamicThreshold):
|
||||
"""
|
||||
The smallest possible threshold satisfying a minimum acceptable true
|
||||
positive rate (also known as precision).
|
||||
"""
|
||||
|
||||
def __init__(self, min_precision):
|
||||
self.min_precision = min_precision
|
||||
|
||||
def find(self, clf, x_train, y_train):
|
||||
proba = clf.predict_proba(x_train)
|
||||
|
||||
assert isinstance(proba, np.ndarray), \
|
||||
"classifier should return numpy array"
|
||||
assert proba.shape == (x_train.shape[0], 2), \
|
||||
"classifier should return (%d,%d)-shaped array, not %s" % (
|
||||
x_train.shape[0], 2, str(proba.shape))
|
||||
|
||||
fps, tps, thresholds = _binary_clf_curve(y_train, proba[:, 1])
|
||||
precision = tps / (tps + fps)
|
||||
|
||||
for k in reversed(range(len(precision))):
|
||||
if precision[k] >= self.min_precision:
|
||||
return thresholds[k]
|
||||
return 2.0
|
||||
42
miplearn/components/__init__.py
Normal file
42
miplearn/components/__init__.py
Normal file
@@ -0,0 +1,42 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
|
||||
def classifier_evaluation_dict(tp, tn, fp, fn):
|
||||
p = tp + fn
|
||||
n = fp + tn
|
||||
d = {
|
||||
"Predicted positive": fp + tp,
|
||||
"Predicted negative": fn + tn,
|
||||
"Condition positive": p,
|
||||
"Condition negative": n,
|
||||
"True positive": tp,
|
||||
"True negative": tn,
|
||||
"False positive": fp,
|
||||
"False negative": fn,
|
||||
"Accuracy": (tp + tn) / (p + n),
|
||||
"F1 score": (2 * tp) / (2 * tp + fp + fn),
|
||||
}
|
||||
|
||||
if p > 0:
|
||||
d["Recall"] = tp / p
|
||||
else:
|
||||
d["Recall"] = 1.0
|
||||
|
||||
if tp + fp > 0:
|
||||
d["Precision"] = tp / (tp + fp)
|
||||
else:
|
||||
d["Precision"] = 1.0
|
||||
|
||||
|
||||
t = (p + n) / 100.0
|
||||
d["Predicted positive (%)"] = d["Predicted positive"] / t
|
||||
d["Predicted negative (%)"] = d["Predicted negative"] / t
|
||||
d["Condition positive (%)"] = d["Condition positive"] / t
|
||||
d["Condition negative (%)"] = d["Condition negative"] / t
|
||||
d["True positive (%)"] = d["True positive"] / t
|
||||
d["True negative (%)"] = d["True negative"] / t
|
||||
d["False positive (%)"] = d["False positive"] / t
|
||||
d["False negative (%)"] = d["False negative"] / t
|
||||
return d
|
||||
23
miplearn/components/component.py
Normal file
23
miplearn/components/component.py
Normal file
@@ -0,0 +1,23 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
|
||||
class Component(ABC):
|
||||
"""
|
||||
A Component is an object which adds functionality to a LearningSolver.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def before_solve(self, solver, instance, model):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def after_solve(self, solver, instance, model, results):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def fit(self, training_instances):
|
||||
pass
|
||||
93
miplearn/components/cuts.py
Normal file
93
miplearn/components/cuts.py
Normal file
@@ -0,0 +1,93 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
import sys
|
||||
from copy import deepcopy
|
||||
|
||||
from miplearn.classifiers.counting import CountingClassifier
|
||||
from miplearn.components import classifier_evaluation_dict
|
||||
|
||||
from .component import Component
|
||||
from ..extractors import *
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class UserCutsComponent(Component):
|
||||
"""
|
||||
A component that predicts which user cuts to enforce.
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
classifier=CountingClassifier(),
|
||||
threshold=0.05):
|
||||
self.violations = set()
|
||||
self.count = {}
|
||||
self.n_samples = 0
|
||||
self.threshold = threshold
|
||||
self.classifier_prototype = classifier
|
||||
self.classifiers = {}
|
||||
|
||||
def before_solve(self, solver, instance, model):
|
||||
logger.info("Predicting violated user cuts...")
|
||||
violations = self.predict(instance)
|
||||
logger.info("Enforcing %d cuts..." % len(violations))
|
||||
for v in violations:
|
||||
cut = instance.build_user_cut(model, v)
|
||||
solver.internal_solver.add_constraint(cut)
|
||||
|
||||
def after_solve(self, solver, instance, model, results):
|
||||
pass
|
||||
|
||||
def fit(self, training_instances):
|
||||
logger.debug("Fitting...")
|
||||
features = InstanceFeaturesExtractor().extract(training_instances)
|
||||
|
||||
self.classifiers = {}
|
||||
violation_to_instance_idx = {}
|
||||
for (idx, instance) in enumerate(training_instances):
|
||||
for v in instance.found_violated_user_cuts:
|
||||
if v not in self.classifiers:
|
||||
self.classifiers[v] = deepcopy(self.classifier_prototype)
|
||||
violation_to_instance_idx[v] = []
|
||||
violation_to_instance_idx[v] += [idx]
|
||||
|
||||
for (v, classifier) in tqdm(self.classifiers.items(),
|
||||
desc="Fit (user cuts)",
|
||||
disable=not sys.stdout.isatty(),
|
||||
):
|
||||
logger.debug("Training: %s" % (str(v)))
|
||||
label = np.zeros(len(training_instances))
|
||||
label[violation_to_instance_idx[v]] = 1.0
|
||||
classifier.fit(features, label)
|
||||
|
||||
def predict(self, instance):
|
||||
violations = []
|
||||
features = InstanceFeaturesExtractor().extract([instance])
|
||||
for (v, classifier) in self.classifiers.items():
|
||||
proba = classifier.predict_proba(features)
|
||||
if proba[0][1] > self.threshold:
|
||||
violations += [v]
|
||||
return violations
|
||||
|
||||
def evaluate(self, instances):
|
||||
results = {}
|
||||
all_violations = set()
|
||||
for instance in instances:
|
||||
all_violations |= set(instance.found_violated_user_cuts)
|
||||
for idx in tqdm(range(len(instances)),
|
||||
desc="Evaluate (lazy)",
|
||||
disable=not sys.stdout.isatty(),
|
||||
):
|
||||
instance = instances[idx]
|
||||
condition_positive = set(instance.found_violated_user_cuts)
|
||||
condition_negative = all_violations - condition_positive
|
||||
pred_positive = set(self.predict(instance)) & all_violations
|
||||
pred_negative = all_violations - pred_positive
|
||||
tp = len(pred_positive & condition_positive)
|
||||
tn = len(pred_negative & condition_negative)
|
||||
fp = len(pred_positive & condition_negative)
|
||||
fn = len(pred_negative & condition_positive)
|
||||
results[idx] = classifier_evaluation_dict(tp, tn, fp, fn)
|
||||
return results
|
||||
95
miplearn/components/lazy.py
Normal file
95
miplearn/components/lazy.py
Normal file
@@ -0,0 +1,95 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
import sys
|
||||
from copy import deepcopy
|
||||
|
||||
from miplearn.classifiers.counting import CountingClassifier
|
||||
from miplearn.components import classifier_evaluation_dict
|
||||
|
||||
from .component import Component
|
||||
from ..extractors import *
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LazyConstraintsComponent(Component):
|
||||
"""
|
||||
A component that predicts which lazy constraints to enforce.
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
classifier=CountingClassifier(),
|
||||
threshold=0.05):
|
||||
self.violations = set()
|
||||
self.count = {}
|
||||
self.n_samples = 0
|
||||
self.threshold = threshold
|
||||
self.classifier_prototype = classifier
|
||||
self.classifiers = {}
|
||||
|
||||
def before_solve(self, solver, instance, model):
|
||||
logger.info("Predicting violated lazy constraints...")
|
||||
violations = self.predict(instance)
|
||||
logger.info("Enforcing %d constraints..." % len(violations))
|
||||
for v in violations:
|
||||
cut = instance.build_lazy_constraint(model, v)
|
||||
solver.internal_solver.add_constraint(cut)
|
||||
|
||||
def after_solve(self, solver, instance, model, results):
|
||||
pass
|
||||
|
||||
def fit(self, training_instances):
|
||||
logger.debug("Fitting...")
|
||||
features = InstanceFeaturesExtractor().extract(training_instances)
|
||||
|
||||
self.classifiers = {}
|
||||
violation_to_instance_idx = {}
|
||||
for (idx, instance) in enumerate(training_instances):
|
||||
for v in instance.found_violated_lazy_constraints:
|
||||
if isinstance(v, list):
|
||||
v = tuple(v)
|
||||
if v not in self.classifiers:
|
||||
self.classifiers[v] = deepcopy(self.classifier_prototype)
|
||||
violation_to_instance_idx[v] = []
|
||||
violation_to_instance_idx[v] += [idx]
|
||||
|
||||
for (v, classifier) in tqdm(self.classifiers.items(),
|
||||
desc="Fit (lazy)",
|
||||
disable=not sys.stdout.isatty(),
|
||||
):
|
||||
logger.debug("Training: %s" % (str(v)))
|
||||
label = np.zeros(len(training_instances))
|
||||
label[violation_to_instance_idx[v]] = 1.0
|
||||
classifier.fit(features, label)
|
||||
|
||||
def predict(self, instance):
|
||||
violations = []
|
||||
features = InstanceFeaturesExtractor().extract([instance])
|
||||
for (v, classifier) in self.classifiers.items():
|
||||
proba = classifier.predict_proba(features)
|
||||
if proba[0][1] > self.threshold:
|
||||
violations += [v]
|
||||
return violations
|
||||
|
||||
def evaluate(self, instances):
|
||||
results = {}
|
||||
all_violations = set()
|
||||
for instance in instances:
|
||||
all_violations |= set(instance.found_violated_lazy_constraints)
|
||||
for idx in tqdm(range(len(instances)),
|
||||
desc="Evaluate (lazy)",
|
||||
disable=not sys.stdout.isatty(),
|
||||
):
|
||||
instance = instances[idx]
|
||||
condition_positive = set(instance.found_violated_lazy_constraints)
|
||||
condition_negative = all_violations - condition_positive
|
||||
pred_positive = set(self.predict(instance)) & all_violations
|
||||
pred_negative = all_violations - pred_positive
|
||||
tp = len(pred_positive & condition_positive)
|
||||
tn = len(pred_negative & condition_negative)
|
||||
fp = len(pred_positive & condition_negative)
|
||||
fn = len(pred_negative & condition_positive)
|
||||
results[idx] = classifier_evaluation_dict(tp, tn, fp, fn)
|
||||
return results
|
||||
84
miplearn/components/objective.py
Normal file
84
miplearn/components/objective.py
Normal file
@@ -0,0 +1,84 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
from sklearn.metrics import mean_squared_error, explained_variance_score, max_error, mean_absolute_error, r2_score
|
||||
|
||||
from .. import Component, InstanceFeaturesExtractor, ObjectiveValueExtractor
|
||||
from sklearn.linear_model import LinearRegression
|
||||
from copy import deepcopy
|
||||
import numpy as np
|
||||
import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ObjectiveValueComponent(Component):
|
||||
"""
|
||||
A Component which predicts the optimal objective value of the problem.
|
||||
"""
|
||||
def __init__(self,
|
||||
regressor=LinearRegression()):
|
||||
self.ub_regressor = None
|
||||
self.lb_regressor = None
|
||||
self.regressor_prototype = regressor
|
||||
|
||||
def before_solve(self, solver, instance, model):
|
||||
if self.ub_regressor is not None:
|
||||
lb, ub = self.predict([instance])[0]
|
||||
instance.predicted_ub = ub
|
||||
instance.predicted_lb = lb
|
||||
logger.info("Predicted objective: [%.2f, %.2f]" % (lb, ub))
|
||||
|
||||
def after_solve(self, solver, instance, model, results):
|
||||
if self.ub_regressor is not None:
|
||||
results["Predicted UB"] = instance.predicted_ub
|
||||
results["Predicted LB"] = instance.predicted_lb
|
||||
else:
|
||||
results["Predicted UB"] = None
|
||||
results["Predicted LB"] = None
|
||||
|
||||
def fit(self, training_instances):
|
||||
logger.debug("Extracting features...")
|
||||
features = InstanceFeaturesExtractor().extract(training_instances)
|
||||
ub = ObjectiveValueExtractor(kind="upper bound").extract(training_instances)
|
||||
lb = ObjectiveValueExtractor(kind="lower bound").extract(training_instances)
|
||||
assert ub.shape == (len(training_instances), 1)
|
||||
assert lb.shape == (len(training_instances), 1)
|
||||
self.ub_regressor = deepcopy(self.regressor_prototype)
|
||||
self.lb_regressor = deepcopy(self.regressor_prototype)
|
||||
logger.debug("Fitting ub_regressor...")
|
||||
self.ub_regressor.fit(features, ub.ravel())
|
||||
logger.debug("Fitting ub_regressor...")
|
||||
self.lb_regressor.fit(features, lb.ravel())
|
||||
|
||||
def predict(self, instances):
|
||||
features = InstanceFeaturesExtractor().extract(instances)
|
||||
lb = self.lb_regressor.predict(features)
|
||||
ub = self.ub_regressor.predict(features)
|
||||
assert lb.shape == (len(instances),)
|
||||
assert ub.shape == (len(instances),)
|
||||
return np.array([lb, ub]).T
|
||||
|
||||
def evaluate(self, instances):
|
||||
y_pred = self.predict(instances)
|
||||
y_true = np.array([[inst.lower_bound, inst.upper_bound] for inst in instances])
|
||||
y_true_lb, y_true_ub = y_true[:, 0], y_true[:, 1]
|
||||
y_pred_lb, y_pred_ub = y_pred[:, 1], y_pred[:, 1]
|
||||
ev = {
|
||||
"Lower bound": {
|
||||
"Mean squared error": mean_squared_error(y_true_lb, y_pred_lb),
|
||||
"Explained variance": explained_variance_score(y_true_lb, y_pred_lb),
|
||||
"Max error": max_error(y_true_lb, y_pred_lb),
|
||||
"Mean absolute error": mean_absolute_error(y_true_lb, y_pred_lb),
|
||||
"R2": r2_score(y_true_lb, y_pred_lb),
|
||||
"Median absolute error": mean_absolute_error(y_true_lb, y_pred_lb),
|
||||
},
|
||||
"Upper bound": {
|
||||
"Mean squared error": mean_squared_error(y_true_ub, y_pred_ub),
|
||||
"Explained variance": explained_variance_score(y_true_ub, y_pred_ub),
|
||||
"Max error": max_error(y_true_ub, y_pred_ub),
|
||||
"Mean absolute error": mean_absolute_error(y_true_ub, y_pred_ub),
|
||||
"R2": r2_score(y_true_ub, y_pred_ub),
|
||||
"Median absolute error": mean_absolute_error(y_true_ub, y_pred_ub),
|
||||
},
|
||||
}
|
||||
return ev
|
||||
147
miplearn/components/primal.py
Normal file
147
miplearn/components/primal.py
Normal file
@@ -0,0 +1,147 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from copy import deepcopy
|
||||
import sys
|
||||
|
||||
from .component import Component
|
||||
from ..classifiers.adaptive import AdaptiveClassifier
|
||||
from ..classifiers.threshold import MinPrecisionThreshold, DynamicThreshold
|
||||
from ..components import classifier_evaluation_dict
|
||||
from ..extractors import *
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PrimalSolutionComponent(Component):
|
||||
"""
|
||||
A component that predicts primal solutions.
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
classifier=AdaptiveClassifier(),
|
||||
mode="exact",
|
||||
threshold=MinPrecisionThreshold(0.98)):
|
||||
self.mode = mode
|
||||
self.classifiers = {}
|
||||
self.thresholds = {}
|
||||
self.threshold_prototype = threshold
|
||||
self.classifier_prototype = classifier
|
||||
|
||||
def before_solve(self, solver, instance, model):
|
||||
solution = self.predict(instance)
|
||||
if self.mode == "heuristic":
|
||||
solver.internal_solver.fix(solution)
|
||||
else:
|
||||
solver.internal_solver.set_warm_start(solution)
|
||||
|
||||
def after_solve(self, solver, instance, model, results):
|
||||
pass
|
||||
|
||||
def x(self, training_instances):
|
||||
return VariableFeaturesExtractor().extract(training_instances)
|
||||
|
||||
def y(self, training_instances):
|
||||
return SolutionExtractor().extract(training_instances)
|
||||
|
||||
def fit(self, training_instances, n_jobs=1):
|
||||
logger.debug("Extracting features...")
|
||||
features = VariableFeaturesExtractor().extract(training_instances)
|
||||
solutions = SolutionExtractor().extract(training_instances)
|
||||
|
||||
for category in tqdm(features.keys(),
|
||||
desc="Fit (primal)",
|
||||
disable=not sys.stdout.isatty(),
|
||||
):
|
||||
x_train = features[category]
|
||||
for label in [0, 1]:
|
||||
y_train = solutions[category][:, label].astype(int)
|
||||
|
||||
# If all samples are either positive or negative, make constant predictions
|
||||
y_avg = np.average(y_train)
|
||||
if y_avg < 0.001 or y_avg >= 0.999:
|
||||
self.classifiers[category, label] = round(y_avg)
|
||||
self.thresholds[category, label] = 0.50
|
||||
continue
|
||||
|
||||
# Create a copy of classifier prototype and train it
|
||||
if isinstance(self.classifier_prototype, list):
|
||||
clf = deepcopy(self.classifier_prototype[label])
|
||||
else:
|
||||
clf = deepcopy(self.classifier_prototype)
|
||||
clf.fit(x_train, y_train)
|
||||
|
||||
# Find threshold (dynamic or static)
|
||||
if isinstance(self.threshold_prototype, DynamicThreshold):
|
||||
self.thresholds[category, label] = self.threshold_prototype.find(clf, x_train, y_train)
|
||||
else:
|
||||
self.thresholds[category, label] = deepcopy(self.threshold_prototype)
|
||||
|
||||
self.classifiers[category, label] = clf
|
||||
|
||||
def predict(self, instance):
|
||||
solution = {}
|
||||
x_test = VariableFeaturesExtractor().extract([instance])
|
||||
var_split = Extractor.split_variables(instance)
|
||||
for category in var_split.keys():
|
||||
n = len(var_split[category])
|
||||
for (i, (var, index)) in enumerate(var_split[category]):
|
||||
if var not in solution.keys():
|
||||
solution[var] = {}
|
||||
solution[var][index] = None
|
||||
for label in [0, 1]:
|
||||
if (category, label) not in self.classifiers.keys():
|
||||
continue
|
||||
clf = self.classifiers[category, label]
|
||||
if isinstance(clf, float) or isinstance(clf, int):
|
||||
ws = np.array([[1 - clf, clf] for _ in range(n)])
|
||||
else:
|
||||
ws = clf.predict_proba(x_test[category])
|
||||
assert ws.shape == (n, 2), "ws.shape should be (%d, 2) not %s" % (n, ws.shape)
|
||||
for (i, (var, index)) in enumerate(var_split[category]):
|
||||
if ws[i, 1] >= self.thresholds[category, label]:
|
||||
solution[var][index] = label
|
||||
return solution
|
||||
|
||||
def evaluate(self, instances):
|
||||
ev = {"Fix zero": {},
|
||||
"Fix one": {}}
|
||||
for instance_idx in tqdm(range(len(instances)),
|
||||
desc="Evaluate (primal)",
|
||||
disable=not sys.stdout.isatty(),
|
||||
):
|
||||
instance = instances[instance_idx]
|
||||
solution_actual = instance.solution
|
||||
solution_pred = self.predict(instance)
|
||||
|
||||
vars_all, vars_one, vars_zero = set(), set(), set()
|
||||
pred_one_positive, pred_zero_positive = set(), set()
|
||||
for (varname, var_dict) in solution_actual.items():
|
||||
for (idx, value) in var_dict.items():
|
||||
vars_all.add((varname, idx))
|
||||
if value > 0.5:
|
||||
vars_one.add((varname, idx))
|
||||
else:
|
||||
vars_zero.add((varname, idx))
|
||||
if solution_pred[varname][idx] is not None:
|
||||
if solution_pred[varname][idx] > 0.5:
|
||||
pred_one_positive.add((varname, idx))
|
||||
else:
|
||||
pred_zero_positive.add((varname, idx))
|
||||
pred_one_negative = vars_all - pred_one_positive
|
||||
pred_zero_negative = vars_all - pred_zero_positive
|
||||
|
||||
tp_zero = len(pred_zero_positive & vars_zero)
|
||||
fp_zero = len(pred_zero_positive & vars_one)
|
||||
tn_zero = len(pred_zero_negative & vars_one)
|
||||
fn_zero = len(pred_zero_negative & vars_zero)
|
||||
|
||||
tp_one = len(pred_one_positive & vars_one)
|
||||
fp_one = len(pred_one_positive & vars_zero)
|
||||
tn_one = len(pred_one_negative & vars_zero)
|
||||
fn_one = len(pred_one_negative & vars_one)
|
||||
|
||||
ev["Fix zero"][instance_idx] = classifier_evaluation_dict(tp_zero, tn_zero, fp_zero, fn_zero)
|
||||
ev["Fix one"][instance_idx] = classifier_evaluation_dict(tp_one, tn_one, fp_one, fn_one)
|
||||
return ev
|
||||
3
miplearn/components/tests/__init__.py
Normal file
3
miplearn/components/tests/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
31
miplearn/components/tests/test_cuts.py
Normal file
31
miplearn/components/tests/test_cuts.py
Normal file
@@ -0,0 +1,31 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
import numpy as np
|
||||
import pyomo.environ as pe
|
||||
|
||||
from miplearn import Instance, GurobiPyomoSolver, LearningSolver
|
||||
from miplearn.problems.knapsack import ChallengeA
|
||||
|
||||
|
||||
class CutInstance(Instance):
|
||||
def to_model(self):
|
||||
model = pe.ConcreteModel()
|
||||
model.x = x = pe.Var([0, 1], domain=pe.Binary)
|
||||
model.OBJ = pe.Objective(expr=x[0] + x[1], sense=pe.maximize)
|
||||
model.eq = pe.Constraint(expr=2 * x[0] + 2 * x[1] <= 3)
|
||||
return model
|
||||
|
||||
def get_instance_features(self):
|
||||
return np.zeros(0)
|
||||
|
||||
def get_variable_features(self, var, index):
|
||||
return np.zeros(0)
|
||||
|
||||
|
||||
def test_cut():
|
||||
challenge = ChallengeA()
|
||||
gurobi = GurobiPyomoSolver()
|
||||
solver = LearningSolver(solver=gurobi, time_limit=10)
|
||||
solver.solve(challenge.training_instances[0])
|
||||
# assert False
|
||||
140
miplearn/components/tests/test_lazy.py
Normal file
140
miplearn/components/tests/test_lazy.py
Normal file
@@ -0,0 +1,140 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from unittest.mock import Mock
|
||||
|
||||
import numpy as np
|
||||
from miplearn import LazyConstraintsComponent, LearningSolver, InternalSolver
|
||||
from miplearn.classifiers import Classifier
|
||||
from miplearn.tests import get_test_pyomo_instances
|
||||
from numpy.linalg import norm
|
||||
|
||||
E = 0.1
|
||||
|
||||
|
||||
def test_lazy_fit():
|
||||
instances, models = get_test_pyomo_instances()
|
||||
instances[0].found_violated_lazy_constraints = ["a", "b"]
|
||||
instances[1].found_violated_lazy_constraints = ["b", "c"]
|
||||
classifier = Mock(spec=Classifier)
|
||||
component = LazyConstraintsComponent(classifier=classifier)
|
||||
|
||||
component.fit(instances)
|
||||
|
||||
# Should create one classifier for each violation
|
||||
assert "a" in component.classifiers
|
||||
assert "b" in component.classifiers
|
||||
assert "c" in component.classifiers
|
||||
|
||||
# Should provide correct x_train to each classifier
|
||||
expected_x_train_a = np.array([[67., 21.75, 1287.92], [70., 23.75, 1199.83]])
|
||||
expected_x_train_b = np.array([[67., 21.75, 1287.92], [70., 23.75, 1199.83]])
|
||||
expected_x_train_c = np.array([[67., 21.75, 1287.92], [70., 23.75, 1199.83]])
|
||||
actual_x_train_a = component.classifiers["a"].fit.call_args[0][0]
|
||||
actual_x_train_b = component.classifiers["b"].fit.call_args[0][0]
|
||||
actual_x_train_c = component.classifiers["c"].fit.call_args[0][0]
|
||||
assert norm(expected_x_train_a - actual_x_train_a) < E
|
||||
assert norm(expected_x_train_b - actual_x_train_b) < E
|
||||
assert norm(expected_x_train_c - actual_x_train_c) < E
|
||||
|
||||
# Should provide correct y_train to each classifier
|
||||
expected_y_train_a = np.array([1.0, 0.0])
|
||||
expected_y_train_b = np.array([1.0, 1.0])
|
||||
expected_y_train_c = np.array([0.0, 1.0])
|
||||
actual_y_train_a = component.classifiers["a"].fit.call_args[0][1]
|
||||
actual_y_train_b = component.classifiers["b"].fit.call_args[0][1]
|
||||
actual_y_train_c = component.classifiers["c"].fit.call_args[0][1]
|
||||
assert norm(expected_y_train_a - actual_y_train_a) < E
|
||||
assert norm(expected_y_train_b - actual_y_train_b) < E
|
||||
assert norm(expected_y_train_c - actual_y_train_c) < E
|
||||
|
||||
|
||||
def test_lazy_before():
|
||||
instances, models = get_test_pyomo_instances()
|
||||
instances[0].build_lazy_constraint = Mock(return_value="c1")
|
||||
solver = LearningSolver()
|
||||
solver.internal_solver = Mock(spec=InternalSolver)
|
||||
component = LazyConstraintsComponent(threshold=0.10)
|
||||
component.classifiers = {"a": Mock(spec=Classifier),
|
||||
"b": Mock(spec=Classifier)}
|
||||
component.classifiers["a"].predict_proba = Mock(return_value=[[0.95, 0.05]])
|
||||
component.classifiers["b"].predict_proba = Mock(return_value=[[0.02, 0.80]])
|
||||
|
||||
component.before_solve(solver, instances[0], models[0])
|
||||
|
||||
# Should ask classifier likelihood of each constraint being violated
|
||||
expected_x_test_a = np.array([[67., 21.75, 1287.92]])
|
||||
expected_x_test_b = np.array([[67., 21.75, 1287.92]])
|
||||
actual_x_test_a = component.classifiers["a"].predict_proba.call_args[0][0]
|
||||
actual_x_test_b = component.classifiers["b"].predict_proba.call_args[0][0]
|
||||
assert norm(expected_x_test_a - actual_x_test_a) < E
|
||||
assert norm(expected_x_test_b - actual_x_test_b) < E
|
||||
|
||||
# Should ask instance to generate cut for constraints whose likelihood
|
||||
# of being violated exceeds the threshold
|
||||
instances[0].build_lazy_constraint.assert_called_once_with(models[0], "b")
|
||||
|
||||
# Should ask internal solver to add generated constraint
|
||||
solver.internal_solver.add_constraint.assert_called_once_with("c1")
|
||||
|
||||
|
||||
def test_lazy_evaluate():
|
||||
instances, models = get_test_pyomo_instances()
|
||||
component = LazyConstraintsComponent()
|
||||
component.classifiers = {"a": Mock(spec=Classifier),
|
||||
"b": Mock(spec=Classifier),
|
||||
"c": Mock(spec=Classifier)}
|
||||
component.classifiers["a"].predict_proba = Mock(return_value=[[1.0, 0.0]])
|
||||
component.classifiers["b"].predict_proba = Mock(return_value=[[0.0, 1.0]])
|
||||
component.classifiers["c"].predict_proba = Mock(return_value=[[0.0, 1.0]])
|
||||
|
||||
instances[0].found_violated_lazy_constraints = ["a", "b", "c"]
|
||||
instances[1].found_violated_lazy_constraints = ["b", "d"]
|
||||
assert component.evaluate(instances) == {
|
||||
0: {
|
||||
"Accuracy": 0.75,
|
||||
"F1 score": 0.8,
|
||||
"Precision": 1.0,
|
||||
"Recall": 2/3.,
|
||||
"Predicted positive": 2,
|
||||
"Predicted negative": 2,
|
||||
"Condition positive": 3,
|
||||
"Condition negative": 1,
|
||||
"False negative": 1,
|
||||
"False positive": 0,
|
||||
"True negative": 1,
|
||||
"True positive": 2,
|
||||
"Predicted positive (%)": 50.0,
|
||||
"Predicted negative (%)": 50.0,
|
||||
"Condition positive (%)": 75.0,
|
||||
"Condition negative (%)": 25.0,
|
||||
"False negative (%)": 25.0,
|
||||
"False positive (%)": 0,
|
||||
"True negative (%)": 25.0,
|
||||
"True positive (%)": 50.0,
|
||||
},
|
||||
1: {
|
||||
"Accuracy": 0.5,
|
||||
"F1 score": 0.5,
|
||||
"Precision": 0.5,
|
||||
"Recall": 0.5,
|
||||
"Predicted positive": 2,
|
||||
"Predicted negative": 2,
|
||||
"Condition positive": 2,
|
||||
"Condition negative": 2,
|
||||
"False negative": 1,
|
||||
"False positive": 1,
|
||||
"True negative": 1,
|
||||
"True positive": 1,
|
||||
"Predicted positive (%)": 50.0,
|
||||
"Predicted negative (%)": 50.0,
|
||||
"Condition positive (%)": 50.0,
|
||||
"Condition negative (%)": 50.0,
|
||||
"False negative (%)": 25.0,
|
||||
"False positive (%)": 25.0,
|
||||
"True negative (%)": 25.0,
|
||||
"True positive (%)": 25.0,
|
||||
}
|
||||
}
|
||||
|
||||
47
miplearn/components/tests/test_objective.py
Normal file
47
miplearn/components/tests/test_objective.py
Normal file
@@ -0,0 +1,47 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from unittest.mock import Mock
|
||||
|
||||
import numpy as np
|
||||
from miplearn import ObjectiveValueComponent
|
||||
from miplearn.classifiers import Regressor
|
||||
from miplearn.tests import get_test_pyomo_instances
|
||||
|
||||
|
||||
def test_usage():
|
||||
instances, models = get_test_pyomo_instances()
|
||||
comp = ObjectiveValueComponent()
|
||||
comp.fit(instances)
|
||||
assert instances[0].lower_bound == 1183.0
|
||||
assert instances[0].upper_bound == 1183.0
|
||||
assert np.round(comp.predict(instances), 2).tolist() == [[1183.0, 1183.0],
|
||||
[1070.0, 1070.0]]
|
||||
|
||||
|
||||
def test_obj_evaluate():
|
||||
instances, models = get_test_pyomo_instances()
|
||||
reg = Mock(spec=Regressor)
|
||||
reg.predict = Mock(return_value=np.array([1000.0, 1000.0]))
|
||||
comp = ObjectiveValueComponent(regressor=reg)
|
||||
comp.fit(instances)
|
||||
ev = comp.evaluate(instances)
|
||||
assert ev == {
|
||||
'Lower bound': {
|
||||
'Explained variance': 0.0,
|
||||
'Max error': 183.0,
|
||||
'Mean absolute error': 126.5,
|
||||
'Mean squared error': 19194.5,
|
||||
'Median absolute error': 126.5,
|
||||
'R2': -5.012843605607331,
|
||||
},
|
||||
'Upper bound': {
|
||||
'Explained variance': 0.0,
|
||||
'Max error': 183.0,
|
||||
'Mean absolute error': 126.5,
|
||||
'Mean squared error': 19194.5,
|
||||
'Median absolute error': 126.5,
|
||||
'R2': -5.012843605607331,
|
||||
}
|
||||
}
|
||||
99
miplearn/components/tests/test_primal.py
Normal file
99
miplearn/components/tests/test_primal.py
Normal file
@@ -0,0 +1,99 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from unittest.mock import Mock
|
||||
|
||||
import numpy as np
|
||||
from miplearn import PrimalSolutionComponent
|
||||
from miplearn.classifiers import Classifier
|
||||
from miplearn.tests import get_test_pyomo_instances
|
||||
|
||||
|
||||
def test_predict():
|
||||
instances, models = get_test_pyomo_instances()
|
||||
comp = PrimalSolutionComponent()
|
||||
comp.fit(instances)
|
||||
solution = comp.predict(instances[0])
|
||||
assert "x" in solution
|
||||
assert 0 in solution["x"]
|
||||
assert 1 in solution["x"]
|
||||
assert 2 in solution["x"]
|
||||
assert 3 in solution["x"]
|
||||
|
||||
|
||||
def test_evaluate():
|
||||
instances, models = get_test_pyomo_instances()
|
||||
clf_zero = Mock(spec=Classifier)
|
||||
clf_zero.predict_proba = Mock(return_value=np.array([
|
||||
[0., 1.], # x[0]
|
||||
[0., 1.], # x[1]
|
||||
[1., 0.], # x[2]
|
||||
[1., 0.], # x[3]
|
||||
]))
|
||||
clf_one = Mock(spec=Classifier)
|
||||
clf_one.predict_proba = Mock(return_value=np.array([
|
||||
[1., 0.], # x[0] instances[0]
|
||||
[1., 0.], # x[1] instances[0]
|
||||
[0., 1.], # x[2] instances[0]
|
||||
[1., 0.], # x[3] instances[0]
|
||||
]))
|
||||
comp = PrimalSolutionComponent(classifier=[clf_zero, clf_one],
|
||||
threshold=0.50)
|
||||
comp.fit(instances[:1])
|
||||
assert comp.predict(instances[0]) == {"x": {0: 0,
|
||||
1: 0,
|
||||
2: 1,
|
||||
3: None}}
|
||||
assert instances[0].solution == {"x": {0: 1,
|
||||
1: 0,
|
||||
2: 1,
|
||||
3: 1}}
|
||||
ev = comp.evaluate(instances[:1])
|
||||
assert ev == {'Fix one': {0: {'Accuracy': 0.5,
|
||||
'Condition negative': 1,
|
||||
'Condition negative (%)': 25.0,
|
||||
'Condition positive': 3,
|
||||
'Condition positive (%)': 75.0,
|
||||
'F1 score': 0.5,
|
||||
'False negative': 2,
|
||||
'False negative (%)': 50.0,
|
||||
'False positive': 0,
|
||||
'False positive (%)': 0.0,
|
||||
'Precision': 1.0,
|
||||
'Predicted negative': 3,
|
||||
'Predicted negative (%)': 75.0,
|
||||
'Predicted positive': 1,
|
||||
'Predicted positive (%)': 25.0,
|
||||
'Recall': 0.3333333333333333,
|
||||
'True negative': 1,
|
||||
'True negative (%)': 25.0,
|
||||
'True positive': 1,
|
||||
'True positive (%)': 25.0}},
|
||||
'Fix zero': {0: {'Accuracy': 0.75,
|
||||
'Condition negative': 3,
|
||||
'Condition negative (%)': 75.0,
|
||||
'Condition positive': 1,
|
||||
'Condition positive (%)': 25.0,
|
||||
'F1 score': 0.6666666666666666,
|
||||
'False negative': 0,
|
||||
'False negative (%)': 0.0,
|
||||
'False positive': 1,
|
||||
'False positive (%)': 25.0,
|
||||
'Precision': 0.5,
|
||||
'Predicted negative': 2,
|
||||
'Predicted negative (%)': 50.0,
|
||||
'Predicted positive': 2,
|
||||
'Predicted positive (%)': 50.0,
|
||||
'Recall': 1.0,
|
||||
'True negative': 2,
|
||||
'True negative (%)': 50.0,
|
||||
'True positive': 1,
|
||||
'True positive (%)': 25.0}}}
|
||||
|
||||
|
||||
def test_primal_parallel_fit():
|
||||
instances, models = get_test_pyomo_instances()
|
||||
comp = PrimalSolutionComponent()
|
||||
comp.fit(instances, n_jobs=2)
|
||||
assert len(comp.classifiers) == 2
|
||||
105
miplearn/extractors.py
Normal file
105
miplearn/extractors.py
Normal file
@@ -0,0 +1,105 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
import logging
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
import numpy as np
|
||||
from tqdm import tqdm
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Extractor(ABC):
|
||||
@abstractmethod
|
||||
def extract(self, instances,):
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def split_variables(instance):
|
||||
assert hasattr(instance, "lp_solution")
|
||||
result = {}
|
||||
for var_name in instance.lp_solution:
|
||||
for index in instance.lp_solution[var_name]:
|
||||
category = instance.get_variable_category(var_name, index)
|
||||
if category is None:
|
||||
continue
|
||||
if category not in result:
|
||||
result[category] = []
|
||||
result[category] += [(var_name, index)]
|
||||
return result
|
||||
|
||||
|
||||
class VariableFeaturesExtractor(Extractor):
|
||||
def extract(self, instances):
|
||||
result = {}
|
||||
for instance in tqdm(instances,
|
||||
desc="Extract (vars)",
|
||||
disable=len(instances) < 5):
|
||||
instance_features = instance.get_instance_features()
|
||||
var_split = self.split_variables(instance)
|
||||
for (category, var_index_pairs) in var_split.items():
|
||||
if category not in result:
|
||||
result[category] = []
|
||||
for (var_name, index) in var_index_pairs:
|
||||
result[category] += [
|
||||
instance_features.tolist() + \
|
||||
instance.get_variable_features(var_name, index).tolist() + \
|
||||
[instance.lp_solution[var_name][index]]
|
||||
]
|
||||
for category in result:
|
||||
result[category] = np.array(result[category])
|
||||
return result
|
||||
|
||||
|
||||
class SolutionExtractor(Extractor):
|
||||
def __init__(self, relaxation=False):
|
||||
self.relaxation = relaxation
|
||||
|
||||
def extract(self, instances):
|
||||
result = {}
|
||||
for instance in tqdm(instances,
|
||||
desc="Extract (solution)",
|
||||
disable=len(instances) < 5):
|
||||
var_split = self.split_variables(instance)
|
||||
for (category, var_index_pairs) in var_split.items():
|
||||
if category not in result:
|
||||
result[category] = []
|
||||
for (var_name, index) in var_index_pairs:
|
||||
if self.relaxation:
|
||||
v = instance.lp_solution[var_name][index]
|
||||
else:
|
||||
v = instance.solution[var_name][index]
|
||||
if v is None:
|
||||
result[category] += [[0, 0]]
|
||||
else:
|
||||
result[category] += [[1 - v, v]]
|
||||
for category in result:
|
||||
result[category] = np.array(result[category])
|
||||
return result
|
||||
|
||||
|
||||
class InstanceFeaturesExtractor(Extractor):
|
||||
def extract(self, instances):
|
||||
return np.vstack([
|
||||
np.hstack([
|
||||
instance.get_instance_features(),
|
||||
instance.lp_value,
|
||||
])
|
||||
for instance in instances
|
||||
])
|
||||
|
||||
|
||||
class ObjectiveValueExtractor(Extractor):
|
||||
def __init__(self, kind="lp"):
|
||||
assert kind in ["lower bound", "upper bound", "lp"]
|
||||
self.kind = kind
|
||||
|
||||
def extract(self, instances):
|
||||
if self.kind == "lower bound":
|
||||
return np.array([[instance.lower_bound] for instance in instances])
|
||||
if self.kind == "upper bound":
|
||||
return np.array([[instance.upper_bound] for instance in instances])
|
||||
if self.kind == "lp":
|
||||
return np.array([[instance.lp_value] for instance in instances])
|
||||
131
miplearn/instance.py
Normal file
131
miplearn/instance.py
Normal file
@@ -0,0 +1,131 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
import pickle, gzip, json
|
||||
|
||||
|
||||
class Instance(ABC):
|
||||
"""
|
||||
Abstract class holding all the data necessary to generate a concrete model of the problem.
|
||||
|
||||
In the knapsack problem, for example, this class could hold the number of items, their weights
|
||||
and costs, as well as the size of the knapsack. Objects implementing this class are able to
|
||||
convert themselves into a concrete optimization model, which can be optimized by a solver, or
|
||||
into arrays of features, which can be provided as inputs to machine learning models.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def to_model(self):
|
||||
"""
|
||||
Returns a concrete Pyomo model corresponding to this instance.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_instance_features(self):
|
||||
"""
|
||||
Returns a 1-dimensional Numpy array of (numerical) features describing the entire instance.
|
||||
|
||||
The array is used by LearningSolver to determine how similar two instances are. It may also
|
||||
be used to predict, in combination with variable-specific features, the values of binary
|
||||
decision variables in the problem.
|
||||
|
||||
There is not necessarily a one-to-one correspondence between models and instance features:
|
||||
the features may encode only part of the data necessary to generate the complete model.
|
||||
Features may also be statistics computed from the original data. For example, in the
|
||||
knapsack problem, an implementation may decide to provide as instance features only
|
||||
the average weights, average prices, number of items and the size of the knapsack.
|
||||
|
||||
The returned array MUST have the same length for all relevant instances of the problem. If
|
||||
two instances map into arrays of different lengths, they cannot be solved by the same
|
||||
LearningSolver object.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_variable_features(self, var, index):
|
||||
"""
|
||||
Returns a 1-dimensional array of (numerical) features describing a particular decision
|
||||
variable.
|
||||
|
||||
The argument `var` is a pyomo.core.Var object, which represents a collection of decision
|
||||
variables. The argument `index` specifies which variable in the collection is the relevant
|
||||
one.
|
||||
|
||||
In combination with instance features, variable features are used by LearningSolver to
|
||||
predict, among other things, the optimal value of each decision variable before the
|
||||
optimization takes place. In the knapsack problem, for example, an implementation could
|
||||
provide as variable features the weight and the price of a specific item.
|
||||
|
||||
Like instance features, the arrays returned by this method MUST have the same length for
|
||||
all variables within the same category, for all relevant instances of the problem.
|
||||
"""
|
||||
pass
|
||||
|
||||
def get_variable_category(self, var, index):
|
||||
"""
|
||||
Returns the category (a string, an integer or any hashable type) for each decision
|
||||
variable.
|
||||
|
||||
If two variables have the same category, LearningSolver will use the same internal ML
|
||||
model to predict the values of both variables. By default, all variables belong to the
|
||||
"default" category, and therefore only one ML model is used for all variables.
|
||||
|
||||
If the returned category is None, ML models will ignore the variable.
|
||||
"""
|
||||
return "default"
|
||||
|
||||
def find_violated_lazy_constraints(self, model):
|
||||
"""
|
||||
Returns lazy constraint violations found for the current solution.
|
||||
|
||||
After solving a model, LearningSolver will ask the instance to identify which lazy
|
||||
constraints are violated by the current solution. For each identified violation,
|
||||
LearningSolver will then call the build_lazy_constraint, add the generated Pyomo
|
||||
constraint to the model, then resolve the problem. The process repeats until no further
|
||||
lazy constraint violations are found.
|
||||
|
||||
Each "violation" is simply a string, a tuple or any other hashable type which allows the
|
||||
instance to identify unambiguously which lazy constraint should be generated. In the
|
||||
Traveling Salesman Problem, for example, a subtour violation could be a frozen set
|
||||
containing the cities in the subtour.
|
||||
|
||||
For a concrete example, see TravelingSalesmanInstance.
|
||||
"""
|
||||
return []
|
||||
|
||||
def build_lazy_constraint(self, model, violation):
|
||||
"""
|
||||
Returns a Pyomo constraint which fixes a given violation.
|
||||
|
||||
This method is typically called immediately after find_violated_lazy_constraints. The violation object
|
||||
provided to this method is exactly the same object returned earlier by find_violated_lazy_constraints.
|
||||
After some training, LearningSolver may decide to proactively build some lazy constraints
|
||||
at the beginning of the optimization process, before a solution is even available. In this
|
||||
case, build_lazy_constraints will be called without a corresponding call to
|
||||
find_violated_lazy_constraints.
|
||||
|
||||
The implementation should not directly add the constraint to the model. The constraint
|
||||
will be added by LearningSolver after the method returns.
|
||||
|
||||
For a concrete example, see TravelingSalesmanInstance.
|
||||
"""
|
||||
pass
|
||||
|
||||
def find_violated_user_cuts(self, model):
|
||||
return []
|
||||
|
||||
def build_user_cut(self, model, violation):
|
||||
pass
|
||||
|
||||
def load(self, filename):
|
||||
with gzip.GzipFile(filename, 'r') as f:
|
||||
data = json.loads(f.read().decode('utf-8'))
|
||||
self.__dict__ = data
|
||||
|
||||
def dump(self, filename):
|
||||
data = json.dumps(self.__dict__, indent=2).encode('utf-8')
|
||||
with gzip.GzipFile(filename, 'w') as f:
|
||||
f.write(data)
|
||||
46
miplearn/log.py
Normal file
46
miplearn/log.py
Normal file
@@ -0,0 +1,46 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from datetime import timedelta
|
||||
import logging
|
||||
import time
|
||||
import sys
|
||||
|
||||
if sys.stdout.isatty():
|
||||
log_colors = {
|
||||
"green": '\033[92m',
|
||||
"yellow": '\033[93m',
|
||||
"red": '\033[91m',
|
||||
"reset": '\033[0m',
|
||||
}
|
||||
else:
|
||||
log_colors = {
|
||||
"green": "",
|
||||
"yellow": "",
|
||||
"red": "",
|
||||
"reset": "",
|
||||
}
|
||||
|
||||
class TimeFormatter():
|
||||
def __init__(self, start_time):
|
||||
self.start_time = start_time
|
||||
|
||||
def format(self, record):
|
||||
if record.levelno >= logging.ERROR:
|
||||
color = log_colors["red"]
|
||||
elif record.levelno >= logging.WARNING:
|
||||
color = log_colors["yellow"]
|
||||
else:
|
||||
color = log_colors["green"]
|
||||
return "%s[%12.3f]%s %s" % (color,
|
||||
record.created - self.start_time,
|
||||
log_colors["reset"],
|
||||
record.getMessage())
|
||||
|
||||
def setup_logger(start_time):
|
||||
handler = logging.StreamHandler()
|
||||
handler.setFormatter(TimeFormatter(start_time))
|
||||
logging.getLogger().addHandler(handler)
|
||||
logging.getLogger("miplearn").setLevel(logging.INFO)
|
||||
lg = logging.getLogger("miplearn")
|
||||
3
miplearn/problems/__init__.py
Normal file
3
miplearn/problems/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
275
miplearn/problems/knapsack.py
Normal file
275
miplearn/problems/knapsack.py
Normal file
@@ -0,0 +1,275 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
import miplearn
|
||||
from miplearn import Instance
|
||||
import numpy as np
|
||||
import pyomo.environ as pe
|
||||
from scipy.stats import uniform, randint, bernoulli
|
||||
from scipy.stats.distributions import rv_frozen
|
||||
|
||||
|
||||
class ChallengeA:
|
||||
"""
|
||||
- 250 variables, 10 constraints, fixed weights
|
||||
- w ~ U(0, 1000), jitter ~ U(0.95, 1.05)
|
||||
- K = 500, u ~ U(0., 1.)
|
||||
- alpha = 0.25
|
||||
"""
|
||||
def __init__(self,
|
||||
seed=42,
|
||||
n_training_instances=500,
|
||||
n_test_instances=50):
|
||||
|
||||
np.random.seed(seed)
|
||||
self.gen = MultiKnapsackGenerator(n=randint(low=250, high=251),
|
||||
m=randint(low=10, high=11),
|
||||
w=uniform(loc=0.0, scale=1000.0),
|
||||
K=uniform(loc=500.0, scale=0.0),
|
||||
u=uniform(loc=0.0, scale=1.0),
|
||||
alpha=uniform(loc=0.25, scale=0.0),
|
||||
fix_w=True,
|
||||
w_jitter=uniform(loc=0.95, scale=0.1),
|
||||
)
|
||||
np.random.seed(seed + 1)
|
||||
self.training_instances = self.gen.generate(n_training_instances)
|
||||
|
||||
np.random.seed(seed + 2)
|
||||
self.test_instances = self.gen.generate(n_test_instances)
|
||||
|
||||
|
||||
class MultiKnapsackInstance(Instance):
|
||||
"""Representation of the Multidimensional 0-1 Knapsack Problem.
|
||||
|
||||
Given a set of n items and m knapsacks, the problem is to find a subset of items S maximizing
|
||||
sum(prices[i] for i in S). If selected, each item i occupies weights[i,j] units of space in
|
||||
each knapsack j. Furthermore, each knapsack j has limited storage space, given by capacities[j].
|
||||
|
||||
This implementation assigns a different category for each decision variable, and therefore
|
||||
trains one ML model per variable. It is only suitable when training and test instances have
|
||||
same size and items don't shuffle around.
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
prices,
|
||||
capacities,
|
||||
weights):
|
||||
assert isinstance(prices, np.ndarray)
|
||||
assert isinstance(capacities, np.ndarray)
|
||||
assert isinstance(weights, np.ndarray)
|
||||
assert len(weights.shape) == 2
|
||||
self.m, self.n = weights.shape
|
||||
assert prices.shape == (self.n,)
|
||||
assert capacities.shape == (self.m,)
|
||||
self.prices = prices
|
||||
self.capacities = capacities
|
||||
self.weights = weights
|
||||
|
||||
def to_model(self):
|
||||
model = pe.ConcreteModel()
|
||||
model.x = pe.Var(range(self.n), domain=pe.Binary)
|
||||
model.OBJ = pe.Objective(rule=lambda model: sum(model.x[j] * self.prices[j]
|
||||
for j in range(self.n)),
|
||||
sense=pe.maximize)
|
||||
model.eq_capacity = pe.ConstraintList()
|
||||
for i in range(self.m):
|
||||
model.eq_capacity.add(sum(model.x[j] * self.weights[i,j]
|
||||
for j in range(self.n)) <= self.capacities[i])
|
||||
|
||||
return model
|
||||
|
||||
def get_instance_features(self):
|
||||
return np.hstack([
|
||||
np.mean(self.prices),
|
||||
self.capacities,
|
||||
])
|
||||
|
||||
def get_variable_features(self, var, index):
|
||||
return np.hstack([
|
||||
self.prices[index],
|
||||
self.weights[:, index],
|
||||
])
|
||||
|
||||
# def get_variable_category(self, var, index):
|
||||
# return index
|
||||
|
||||
|
||||
class MultiKnapsackGenerator:
|
||||
def __init__(self,
|
||||
n=randint(low=100, high=101),
|
||||
m=randint(low=30, high=31),
|
||||
w=randint(low=0, high=1000),
|
||||
K=randint(low=500, high=500),
|
||||
u=uniform(loc=0.0, scale=1.0),
|
||||
alpha=uniform(loc=0.25, scale=0.0),
|
||||
fix_w=False,
|
||||
w_jitter=uniform(loc=1.0, scale=0.0),
|
||||
round=True,
|
||||
):
|
||||
"""Initialize the problem generator.
|
||||
|
||||
Instances have a random number of items (or variables) and a random number of knapsacks
|
||||
(or constraints), as specified by the provided probability distributions `n` and `m`,
|
||||
respectively. The weight of each item `i` on knapsack `j` is sampled independently from
|
||||
the provided distribution `w`. The capacity of knapsack `j` is set to:
|
||||
|
||||
alpha_j * sum(w[i,j] for i in range(n)),
|
||||
|
||||
where `alpha_j`, the tightness ratio, is sampled from the provided probability
|
||||
distribution `alpha`. To make the instances more challenging, the costs of the items
|
||||
are linearly correlated to their average weights. More specifically, the weight of each
|
||||
item `i` is set to:
|
||||
|
||||
sum(w[i,j]/m for j in range(m)) + K * u_i,
|
||||
|
||||
where `K`, the correlation coefficient, and `u_i`, the correlation multiplier, are sampled
|
||||
from the provided probability distributions. Note that `K` is only sample once for the
|
||||
entire instance.
|
||||
|
||||
If fix_w=True is provided, then w[i,j] are kept the same in all generated instances. This
|
||||
also implies that n and m are kept fixed. Although the prices and capacities are derived
|
||||
from w[i,j], as long as u and K are not constants, the generated instances will still not
|
||||
be completely identical.
|
||||
|
||||
If a probability distribution w_jitter is provided, then item weights will be set to
|
||||
w[i,j] * gamma[i,j] where gamma[i,j] is sampled from w_jitter. When combined with
|
||||
fix_w=True, this argument may be used to generate instances where the weight of each item
|
||||
is roughly the same, but not exactly identical, across all instances. The prices of the
|
||||
items and the capacities of the knapsacks will be calculated as above, but using these
|
||||
perturbed weights instead.
|
||||
|
||||
By default, all generated prices, weights and capacities are rounded to the nearest integer
|
||||
number. If `round=False` is provided, this rounding will be disabled.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
n: rv_discrete
|
||||
Probability distribution for the number of items (or variables)
|
||||
m: rv_discrete
|
||||
Probability distribution for the number of knapsacks (or constraints)
|
||||
w: rv_continuous
|
||||
Probability distribution for the item weights
|
||||
K: rv_continuous
|
||||
Probability distribution for the profit correlation coefficient
|
||||
u: rv_continuous
|
||||
Probability distribution for the profit multiplier
|
||||
alpha: rv_continuous
|
||||
Probability distribution for the tightness ratio
|
||||
fix_w: boolean
|
||||
If true, weights are kept the same (minus the noise from w_jitter) in all instances
|
||||
w_jitter: rv_continuous
|
||||
Probability distribution for random noise added to the weights
|
||||
round: boolean
|
||||
If true, all prices, weights and capacities are rounded to the nearest integer
|
||||
"""
|
||||
assert isinstance(n, rv_frozen), "n should be a SciPy probability distribution"
|
||||
assert isinstance(m, rv_frozen), "m should be a SciPy probability distribution"
|
||||
assert isinstance(w, rv_frozen), "w should be a SciPy probability distribution"
|
||||
assert isinstance(K, rv_frozen), "K should be a SciPy probability distribution"
|
||||
assert isinstance(u, rv_frozen), "u should be a SciPy probability distribution"
|
||||
assert isinstance(alpha, rv_frozen), "alpha should be a SciPy probability distribution"
|
||||
assert isinstance(fix_w, bool), "fix_w should be boolean"
|
||||
assert isinstance(w_jitter, rv_frozen), \
|
||||
"w_jitter should be a SciPy probability distribution"
|
||||
|
||||
self.n = n
|
||||
self.m = m
|
||||
self.w = w
|
||||
self.K = K
|
||||
self.u = u
|
||||
self.alpha = alpha
|
||||
self.w_jitter = w_jitter
|
||||
self.round = round
|
||||
|
||||
if fix_w:
|
||||
self.fix_n = self.n.rvs()
|
||||
self.fix_m = self.m.rvs()
|
||||
self.fix_w = np.array([self.w.rvs(self.fix_n) for _ in range(self.fix_m)])
|
||||
self.fix_u = self.u.rvs(self.fix_n)
|
||||
self.fix_K = self.K.rvs()
|
||||
else:
|
||||
self.fix_n = None
|
||||
self.fix_m = None
|
||||
self.fix_w = None
|
||||
self.fix_u = None
|
||||
self.fix_K = None
|
||||
|
||||
def generate(self, n_samples):
|
||||
def _sample():
|
||||
if self.fix_w is not None:
|
||||
n = self.fix_n
|
||||
m = self.fix_m
|
||||
w = self.fix_w
|
||||
u = self.fix_u
|
||||
K = self.fix_K
|
||||
else:
|
||||
n = self.n.rvs()
|
||||
m = self.m.rvs()
|
||||
w = np.array([self.w.rvs(n) for _ in range(m)])
|
||||
u = self.u.rvs(n)
|
||||
K = self.K.rvs()
|
||||
w = w * np.array([self.w_jitter.rvs(n) for _ in range(m)])
|
||||
alpha = self.alpha.rvs(m)
|
||||
p = np.array([w[:,j].sum() / m + K * u[j] for j in range(n)])
|
||||
b = np.array([w[i,:].sum() * alpha[i] for i in range(m)])
|
||||
if self.round:
|
||||
p = p.round()
|
||||
b = b.round()
|
||||
w = w.round()
|
||||
return MultiKnapsackInstance(p, b, w)
|
||||
return [_sample() for _ in range(n_samples)]
|
||||
|
||||
|
||||
class KnapsackInstance(Instance):
|
||||
"""
|
||||
Simpler (one-dimensional) Knapsack Problem, used for testing.
|
||||
"""
|
||||
def __init__(self, weights, prices, capacity):
|
||||
self.weights = weights
|
||||
self.prices = prices
|
||||
self.capacity = capacity
|
||||
|
||||
def to_model(self):
|
||||
model = pe.ConcreteModel()
|
||||
items = range(len(self.weights))
|
||||
model.x = pe.Var(items, domain=pe.Binary)
|
||||
model.OBJ = pe.Objective(expr=sum(model.x[v] * self.prices[v] for v in items),
|
||||
sense=pe.maximize)
|
||||
model.eq_capacity = pe.Constraint(expr=sum(model.x[v] * self.weights[v]
|
||||
for v in items) <= self.capacity)
|
||||
return model
|
||||
|
||||
def get_instance_features(self):
|
||||
return np.array([
|
||||
self.capacity,
|
||||
np.average(self.weights),
|
||||
])
|
||||
|
||||
def get_variable_features(self, var, index):
|
||||
return np.array([
|
||||
self.weights[index],
|
||||
self.prices[index],
|
||||
])
|
||||
|
||||
|
||||
class GurobiKnapsackInstance(KnapsackInstance):
|
||||
"""
|
||||
Simpler (one-dimensional) knapsack instance, implemented directly in Gurobi
|
||||
instead of Pyomo, used for testing.
|
||||
"""
|
||||
def __init__(self, weights, prices, capacity):
|
||||
super().__init__(weights, prices, capacity)
|
||||
|
||||
def to_model(self):
|
||||
import gurobipy as gp
|
||||
from gurobipy import GRB
|
||||
|
||||
model = gp.Model("Knapsack")
|
||||
n = len(self.weights)
|
||||
x = model.addVars(n, vtype=GRB.BINARY, name="x")
|
||||
model.addConstr(gp.quicksum(x[i] * self.weights[i]
|
||||
for i in range(n)) <= self.capacity)
|
||||
model.setObjective(gp.quicksum(x[i] * self.prices[i]
|
||||
for i in range(n)), GRB.MAXIMIZE)
|
||||
return model
|
||||
130
miplearn/problems/stab.py
Normal file
130
miplearn/problems/stab.py
Normal file
@@ -0,0 +1,130 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
import numpy as np
|
||||
import pyomo.environ as pe
|
||||
import networkx as nx
|
||||
from miplearn import Instance
|
||||
import random
|
||||
from scipy.stats import uniform, randint, bernoulli
|
||||
from scipy.stats.distributions import rv_frozen
|
||||
|
||||
|
||||
class ChallengeA:
|
||||
def __init__(self,
|
||||
seed=42,
|
||||
n_training_instances=500,
|
||||
n_test_instances=50,
|
||||
):
|
||||
|
||||
np.random.seed(seed)
|
||||
self.generator = MaxWeightStableSetGenerator(w=uniform(loc=100., scale=50.),
|
||||
n=randint(low=200, high=201),
|
||||
p=uniform(loc=0.05, scale=0.0),
|
||||
fix_graph=True)
|
||||
|
||||
np.random.seed(seed + 1)
|
||||
self.training_instances = self.generator.generate(n_training_instances)
|
||||
|
||||
np.random.seed(seed + 2)
|
||||
self.test_instances = self.generator.generate(n_test_instances)
|
||||
|
||||
|
||||
class MaxWeightStableSetGenerator:
|
||||
"""Random instance generator for the Maximum-Weight Stable Set Problem.
|
||||
|
||||
The generator has two modes of operation. When `fix_graph=True` is provided, one random
|
||||
Erdős-Rényi graph $G_{n,p}$ is generated in the constructor, where $n$ and $p$ are sampled
|
||||
from user-provided probability distributions `n` and `p`. To generate each instance, the
|
||||
generator independently samples each $w_v$ from the user-provided probability distribution `w`.
|
||||
|
||||
When `fix_graph=False`, a new random graph is generated for each instance; the remaining
|
||||
parameters are sampled in the same way.
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
w=uniform(loc=10.0, scale=1.0),
|
||||
n=randint(low=250, high=251),
|
||||
p=uniform(loc=0.05, scale=0.0),
|
||||
fix_graph=True):
|
||||
"""Initialize the problem generator.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
w: rv_continuous
|
||||
Probability distribution for vertex weights.
|
||||
n: rv_discrete
|
||||
Probability distribution for parameter $n$ in Erdős-Rényi model.
|
||||
p: rv_continuous
|
||||
Probability distribution for parameter $p$ in Erdős-Rényi model.
|
||||
"""
|
||||
assert isinstance(w, rv_frozen), "w should be a SciPy probability distribution"
|
||||
assert isinstance(n, rv_frozen), "n should be a SciPy probability distribution"
|
||||
assert isinstance(p, rv_frozen), "p should be a SciPy probability distribution"
|
||||
self.w = w
|
||||
self.n = n
|
||||
self.p = p
|
||||
self.fix_graph = fix_graph
|
||||
self.graph = None
|
||||
if fix_graph:
|
||||
self.graph = self._generate_graph()
|
||||
|
||||
def generate(self, n_samples):
|
||||
def _sample():
|
||||
if self.graph is not None:
|
||||
graph = self.graph
|
||||
else:
|
||||
graph = self._generate_graph()
|
||||
weights = self.w.rvs(graph.number_of_nodes())
|
||||
return MaxWeightStableSetInstance(graph, weights)
|
||||
return [_sample() for _ in range(n_samples)]
|
||||
|
||||
def _generate_graph(self):
|
||||
return nx.generators.random_graphs.binomial_graph(self.n.rvs(), self.p.rvs())
|
||||
|
||||
|
||||
class MaxWeightStableSetInstance(Instance):
|
||||
"""An instance of the Maximum-Weight Stable Set Problem.
|
||||
|
||||
Given a graph G=(V,E) and a weight w_v for each vertex v, the problem asks for a stable
|
||||
set S of G maximizing sum(w_v for v in S). A stable set (also called independent set) is
|
||||
a subset of vertices, no two of which are adjacent.
|
||||
|
||||
This is one of Karp's 21 NP-complete problems.
|
||||
"""
|
||||
|
||||
def __init__(self, graph, weights):
|
||||
self.graph = graph
|
||||
self.weights = weights
|
||||
|
||||
def to_model(self):
|
||||
nodes = list(self.graph.nodes)
|
||||
model = pe.ConcreteModel()
|
||||
model.x = pe.Var(nodes, domain=pe.Binary)
|
||||
model.OBJ = pe.Objective(expr=sum(model.x[v] * self.weights[v] for v in nodes),
|
||||
sense=pe.maximize)
|
||||
model.clique_eqs = pe.ConstraintList()
|
||||
for clique in nx.find_cliques(self.graph):
|
||||
model.clique_eqs.add(sum(model.x[i] for i in clique) <= 1)
|
||||
return model
|
||||
|
||||
def get_instance_features(self):
|
||||
return np.ones(0)
|
||||
|
||||
def get_variable_features(self, var, index):
|
||||
neighbor_weights = [0] * 15
|
||||
neighbor_degrees = [100] * 15
|
||||
for n in self.graph.neighbors(index):
|
||||
neighbor_weights += [self.weights[n] / self.weights[index]]
|
||||
neighbor_degrees += [self.graph.degree(n) / self.graph.degree(index)]
|
||||
neighbor_weights.sort(reverse=True)
|
||||
neighbor_degrees.sort()
|
||||
features = []
|
||||
features += neighbor_weights[:5]
|
||||
features += neighbor_degrees[:5]
|
||||
features += [self.graph.degree(index)]
|
||||
return np.array(features)
|
||||
|
||||
def get_variable_category(self, var, index):
|
||||
return "default"
|
||||
4
miplearn/problems/tests/__init__.py
Normal file
4
miplearn/problems/tests/__init__.py
Normal file
@@ -0,0 +1,4 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
25
miplearn/problems/tests/test_knapsack.py
Normal file
25
miplearn/problems/tests/test_knapsack.py
Normal file
@@ -0,0 +1,25 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from miplearn import LearningSolver
|
||||
from miplearn.problems.knapsack import MultiKnapsackGenerator, MultiKnapsackInstance
|
||||
from scipy.stats import uniform, randint
|
||||
import numpy as np
|
||||
|
||||
|
||||
def test_knapsack_generator():
|
||||
gen = MultiKnapsackGenerator(n=randint(low=100, high=101),
|
||||
m=randint(low=30, high=31),
|
||||
w=randint(low=0, high=1000),
|
||||
K=randint(low=500, high=501),
|
||||
u=uniform(loc=1.0, scale=1.0),
|
||||
alpha=uniform(loc=0.50, scale=0.0),
|
||||
)
|
||||
instances = gen.generate(100)
|
||||
w_sum = sum(instance.weights for instance in instances) / len(instances)
|
||||
p_sum = sum(instance.prices for instance in instances) / len(instances)
|
||||
b_sum = sum(instance.capacities for instance in instances) / len(instances)
|
||||
assert round(np.mean(w_sum), -1) == 500.
|
||||
# assert round(np.mean(p_sum), -1) == 1200. # flaky
|
||||
assert round(np.mean(b_sum), -3) == 25000.
|
||||
46
miplearn/problems/tests/test_stab.py
Normal file
46
miplearn/problems/tests/test_stab.py
Normal file
@@ -0,0 +1,46 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
import networkx as nx
|
||||
import numpy as np
|
||||
from miplearn import LearningSolver
|
||||
from miplearn.problems.stab import MaxWeightStableSetInstance
|
||||
from scipy.stats import uniform, randint
|
||||
|
||||
|
||||
def test_stab():
|
||||
graph = nx.cycle_graph(5)
|
||||
weights = [1., 1., 1., 1., 1.]
|
||||
instance = MaxWeightStableSetInstance(graph, weights)
|
||||
solver = LearningSolver()
|
||||
solver.solve(instance)
|
||||
assert instance.lower_bound == 2.
|
||||
|
||||
|
||||
def test_stab_generator_fixed_graph():
|
||||
np.random.seed(42)
|
||||
from miplearn.problems.stab import MaxWeightStableSetGenerator
|
||||
gen = MaxWeightStableSetGenerator(w=uniform(loc=50., scale=10.),
|
||||
n=randint(low=10, high=11),
|
||||
p=uniform(loc=0.05, scale=0.),
|
||||
fix_graph=True)
|
||||
instances = gen.generate(1_000)
|
||||
weights = np.array([instance.weights for instance in instances])
|
||||
weights_avg_actual = np.round(np.average(weights, axis=0))
|
||||
weights_avg_expected = [55.0] * 10
|
||||
assert list(weights_avg_actual) == weights_avg_expected
|
||||
|
||||
|
||||
def test_stab_generator_random_graph():
|
||||
np.random.seed(42)
|
||||
from miplearn.problems.stab import MaxWeightStableSetGenerator
|
||||
gen = MaxWeightStableSetGenerator(w=uniform(loc=50., scale=10.),
|
||||
n=randint(low=30, high=41),
|
||||
p=uniform(loc=0.5, scale=0.),
|
||||
fix_graph=False)
|
||||
instances = gen.generate(1_000)
|
||||
n_nodes = [instance.graph.number_of_nodes() for instance in instances]
|
||||
n_edges = [instance.graph.number_of_edges() for instance in instances]
|
||||
assert np.round(np.mean(n_nodes)) == 35.
|
||||
assert np.round(np.mean(n_edges), -1) == 300.
|
||||
74
miplearn/problems/tests/test_tsp.py
Normal file
74
miplearn/problems/tests/test_tsp.py
Normal file
@@ -0,0 +1,74 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from miplearn import LearningSolver
|
||||
from miplearn.problems.tsp import TravelingSalesmanGenerator, TravelingSalesmanInstance
|
||||
import numpy as np
|
||||
from numpy.linalg import norm
|
||||
from scipy.spatial.distance import pdist, squareform
|
||||
from scipy.stats import uniform, randint
|
||||
|
||||
|
||||
def test_generator():
|
||||
instances = TravelingSalesmanGenerator(x=uniform(loc=0.0, scale=1000.0),
|
||||
y=uniform(loc=0.0, scale=1000.0),
|
||||
n=randint(low=100, high=101),
|
||||
gamma=uniform(loc=0.95, scale=0.1),
|
||||
fix_cities=True).generate(100)
|
||||
assert len(instances) == 100
|
||||
assert instances[0].n_cities == 100
|
||||
assert norm(instances[0].distances - instances[0].distances.T) < 1e-6
|
||||
d = [instance.distances[0,1] for instance in instances]
|
||||
assert np.std(d) > 0
|
||||
|
||||
|
||||
def test_instance():
|
||||
n_cities = 4
|
||||
distances = np.array([
|
||||
[0., 1., 2., 1.],
|
||||
[1., 0., 1., 2.],
|
||||
[2., 1., 0., 1.],
|
||||
[1., 2., 1., 0.],
|
||||
])
|
||||
instance = TravelingSalesmanInstance(n_cities, distances)
|
||||
for solver_name in ['gurobi', 'cplex']:
|
||||
solver = LearningSolver(solver=solver_name)
|
||||
solver.solve(instance)
|
||||
x = instance.solution["x"]
|
||||
assert x[0,1] == 1.0
|
||||
assert x[0,2] == 0.0
|
||||
assert x[0,3] == 1.0
|
||||
assert x[1,2] == 1.0
|
||||
assert x[1,3] == 0.0
|
||||
assert x[2,3] == 1.0
|
||||
assert instance.lower_bound == 4.0
|
||||
assert instance.upper_bound == 4.0
|
||||
|
||||
|
||||
def test_subtour():
|
||||
n_cities = 6
|
||||
cities = np.array([
|
||||
[0., 0.],
|
||||
[1., 0.],
|
||||
[2., 0.],
|
||||
[3., 0.],
|
||||
[0., 1.],
|
||||
[3., 1.],
|
||||
])
|
||||
distances = squareform(pdist(cities))
|
||||
instance = TravelingSalesmanInstance(n_cities, distances)
|
||||
for solver_name in ['gurobi', 'cplex']:
|
||||
solver = LearningSolver(solver=solver_name)
|
||||
solver.solve(instance)
|
||||
assert hasattr(instance, "found_violated_lazy_constraints")
|
||||
assert hasattr(instance, "found_violated_user_cuts")
|
||||
x = instance.solution["x"]
|
||||
assert x[0,1] == 1.0
|
||||
assert x[0,4] == 1.0
|
||||
assert x[1,2] == 1.0
|
||||
assert x[2,3] == 1.0
|
||||
assert x[3,5] == 1.0
|
||||
assert x[4,5] == 1.0
|
||||
solver.fit([instance])
|
||||
solver.solve(instance)
|
||||
175
miplearn/problems/tsp.py
Normal file
175
miplearn/problems/tsp.py
Normal file
@@ -0,0 +1,175 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
import numpy as np
|
||||
import pyomo.environ as pe
|
||||
from miplearn import Instance
|
||||
from scipy.stats import uniform, randint
|
||||
from scipy.spatial.distance import pdist, squareform
|
||||
from scipy.stats.distributions import rv_frozen
|
||||
import networkx as nx
|
||||
import random
|
||||
|
||||
|
||||
class ChallengeA:
|
||||
def __init__(self,
|
||||
seed=42,
|
||||
n_training_instances=500,
|
||||
n_test_instances=50,
|
||||
):
|
||||
|
||||
np.random.seed(seed)
|
||||
self.generator = TravelingSalesmanGenerator(x=uniform(loc=0.0, scale=1000.0),
|
||||
y=uniform(loc=0.0, scale=1000.0),
|
||||
n=randint(low=350, high=351),
|
||||
gamma=uniform(loc=0.95, scale=0.1),
|
||||
fix_cities=True,
|
||||
round=True,
|
||||
)
|
||||
|
||||
np.random.seed(seed + 1)
|
||||
self.training_instances = self.generator.generate(n_training_instances)
|
||||
|
||||
np.random.seed(seed + 2)
|
||||
self.test_instances = self.generator.generate(n_test_instances)
|
||||
|
||||
|
||||
class TravelingSalesmanGenerator:
|
||||
"""Random generator for the Traveling Salesman Problem."""
|
||||
|
||||
def __init__(self,
|
||||
x=uniform(loc=0.0, scale=1000.0),
|
||||
y=uniform(loc=0.0, scale=1000.0),
|
||||
n=randint(low=100, high=101),
|
||||
gamma=uniform(loc=1.0, scale=0.0),
|
||||
fix_cities=True,
|
||||
round=True,
|
||||
):
|
||||
"""Initializes the problem generator.
|
||||
|
||||
Initially, the generator creates n cities (x_1,y_1),...,(x_n,y_n) where n, x_i and y_i are
|
||||
sampled independently from the provided probability distributions `n`, `x` and `y`. For each
|
||||
(unordered) pair of cities (i,j), the distance d[i,j] between them is set to:
|
||||
|
||||
d[i,j] = gamma[i,j] \sqrt{(x_i - x_j)^2 + (y_i - y_j)^2}
|
||||
|
||||
where gamma is sampled from the provided probability distribution `gamma`.
|
||||
|
||||
If fix_cities=True, the list of cities is kept the same for all generated instances. The
|
||||
gamma values, and therefore also the distances, are still different.
|
||||
|
||||
By default, all distances d[i,j] are rounded to the nearest integer. If `round=False`
|
||||
is provided, this rounding will be disabled.
|
||||
|
||||
Arguments
|
||||
---------
|
||||
x: rv_continuous
|
||||
Probability distribution for the x-coordinate of each city.
|
||||
y: rv_continuous
|
||||
Probability distribution for the y-coordinate of each city.
|
||||
n: rv_discrete
|
||||
Probability distribution for the number of cities.
|
||||
fix_cities: bool
|
||||
If False, cities will be resampled for every generated instance. Otherwise, list of
|
||||
cities will be computed once, during the constructor.
|
||||
round: bool
|
||||
If True, distances are rounded to the nearest integer.
|
||||
"""
|
||||
assert isinstance(x, rv_frozen), "x should be a SciPy probability distribution"
|
||||
assert isinstance(y, rv_frozen), "y should be a SciPy probability distribution"
|
||||
assert isinstance(n, rv_frozen), "n should be a SciPy probability distribution"
|
||||
assert isinstance(gamma, rv_frozen), "gamma should be a SciPy probability distribution"
|
||||
self.x = x
|
||||
self.y = y
|
||||
self.n = n
|
||||
self.gamma = gamma
|
||||
self.round = round
|
||||
|
||||
if fix_cities:
|
||||
self.fixed_n, self.fixed_cities = self._generate_cities()
|
||||
else:
|
||||
self.fixed_n = None
|
||||
self.fixed_cities = None
|
||||
|
||||
def generate(self, n_samples):
|
||||
def _sample():
|
||||
if self.fixed_cities is not None:
|
||||
n, cities = self.fixed_n, self.fixed_cities
|
||||
else:
|
||||
n, cities = self._generate_cities()
|
||||
distances = squareform(pdist(cities)) * self.gamma.rvs(size=(n, n))
|
||||
distances = np.tril(distances) + np.triu(distances.T, 1)
|
||||
if self.round:
|
||||
distances = distances.round()
|
||||
return TravelingSalesmanInstance(n, distances)
|
||||
return [_sample() for _ in range(n_samples)]
|
||||
|
||||
def _generate_cities(self):
|
||||
n = self.n.rvs()
|
||||
cities = np.array([(self.x.rvs(), self.y.rvs()) for _ in range(n)])
|
||||
return n, cities
|
||||
|
||||
|
||||
class TravelingSalesmanInstance(Instance):
|
||||
"""An instance ot the Traveling Salesman Problem.
|
||||
|
||||
Given a list of cities and the distance between each pair of cities, the problem asks for the
|
||||
shortest route starting at the first city, visiting each other city exactly once, then
|
||||
returning to the first city. This problem is a generalization of the Hamiltonian path problem,
|
||||
one of Karp's 21 NP-complete problems.
|
||||
"""
|
||||
|
||||
def __init__(self, n_cities, distances):
|
||||
assert isinstance(distances, np.ndarray)
|
||||
assert distances.shape == (n_cities, n_cities)
|
||||
self.n_cities = n_cities
|
||||
self.distances = distances
|
||||
|
||||
def to_model(self):
|
||||
model = pe.ConcreteModel()
|
||||
model.edges = edges = [(i,j)
|
||||
for i in range(self.n_cities)
|
||||
for j in range(i+1, self.n_cities)]
|
||||
model.x = pe.Var(edges, domain=pe.Binary)
|
||||
model.obj = pe.Objective(expr=sum(model.x[i,j] * self.distances[i,j]
|
||||
for (i,j) in edges),
|
||||
sense=pe.minimize)
|
||||
model.eq_degree = pe.ConstraintList()
|
||||
model.eq_subtour = pe.ConstraintList()
|
||||
for i in range(self.n_cities):
|
||||
model.eq_degree.add(sum(model.x[min(i,j), max(i,j)]
|
||||
for j in range(self.n_cities) if i != j) == 2)
|
||||
return model
|
||||
|
||||
def get_instance_features(self):
|
||||
return np.array([1])
|
||||
|
||||
def get_variable_features(self, var_name, index):
|
||||
return np.array([1])
|
||||
|
||||
def get_variable_category(self, var_name, index):
|
||||
return index
|
||||
|
||||
def find_violated_lazy_constraints(self, model):
|
||||
selected_edges = [e for e in model.edges if model.x[e].value > 0.5]
|
||||
graph = nx.Graph()
|
||||
graph.add_edges_from(selected_edges)
|
||||
components = [frozenset(c) for c in list(nx.connected_components(graph))]
|
||||
violations = []
|
||||
for c in components:
|
||||
if len(c) < self.n_cities:
|
||||
violations += [c]
|
||||
return violations
|
||||
|
||||
def build_lazy_constraint(self, model, component):
|
||||
cut_edges = [e for e in model.edges
|
||||
if (e[0] in component and e[1] not in component) or
|
||||
(e[0] not in component and e[1] in component)]
|
||||
return model.eq_subtour.add(sum(model.x[e] for e in cut_edges) >= 2)
|
||||
|
||||
def find_violated_user_cuts(self, model):
|
||||
return self.find_violated_lazy_constraints(model)
|
||||
|
||||
def build_user_cut(self, model, violation):
|
||||
return self.build_lazy_constraint(model, violation)
|
||||
32
miplearn/solvers/__init__.py
Normal file
32
miplearn/solvers/__init__.py
Normal file
@@ -0,0 +1,32 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
import logging
|
||||
import sys
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RedirectOutput:
|
||||
def __init__(self, streams):
|
||||
self.streams = streams
|
||||
|
||||
def write(self, data):
|
||||
for stream in self.streams:
|
||||
stream.write(data)
|
||||
|
||||
def flush(self):
|
||||
for stream in self.streams:
|
||||
stream.flush()
|
||||
|
||||
def __enter__(self):
|
||||
self._original_stdout = sys.stdout
|
||||
self._original_stderr = sys.stderr
|
||||
sys.stdout = self
|
||||
sys.stderr = self
|
||||
return self
|
||||
|
||||
def __exit__(self, _type, _value, _traceback):
|
||||
sys.stdout = self._original_stdout
|
||||
sys.stderr = self._original_stderr
|
||||
209
miplearn/solvers/guroby.py
Normal file
209
miplearn/solvers/guroby.py
Normal file
@@ -0,0 +1,209 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
import re
|
||||
import sys
|
||||
import logging
|
||||
from io import StringIO
|
||||
|
||||
from . import RedirectOutput
|
||||
from .internal import InternalSolver
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class GurobiSolver(InternalSolver):
|
||||
def __init__(self, params=None):
|
||||
if params is None:
|
||||
params = {
|
||||
"LazyConstraints": 1,
|
||||
"PreCrush": 1,
|
||||
}
|
||||
from gurobipy import GRB
|
||||
self.GRB = GRB
|
||||
self.instance = None
|
||||
self.model = None
|
||||
self.params = params
|
||||
self._all_vars = None
|
||||
self._bin_vars = None
|
||||
self._varname_to_var = None
|
||||
|
||||
def set_instance(self, instance, model=None):
|
||||
if model is None:
|
||||
model = instance.to_model()
|
||||
self.instance = instance
|
||||
self.model = model
|
||||
self.model.update()
|
||||
self._update_vars()
|
||||
|
||||
def _update_vars(self):
|
||||
self._all_vars = {}
|
||||
self._bin_vars = {}
|
||||
for var in self.model.getVars():
|
||||
m = re.search(r"([^[]*)\[(.*)\]", var.varName)
|
||||
if m is None:
|
||||
name = var.varName
|
||||
idx = [0]
|
||||
else:
|
||||
name = m.group(1)
|
||||
idx = tuple(int(k) if k.isdecimal() else k
|
||||
for k in m.group(2).split(","))
|
||||
if len(idx) == 1:
|
||||
idx = idx[0]
|
||||
if name not in self._all_vars:
|
||||
self._all_vars[name] = {}
|
||||
self._all_vars[name][idx] = var
|
||||
if var.vtype != 'C':
|
||||
if name not in self._bin_vars:
|
||||
self._bin_vars[name] = {}
|
||||
self._bin_vars[name][idx] = var
|
||||
|
||||
def _apply_params(self):
|
||||
for (name, value) in self.params.items():
|
||||
self.model.setParam(name, value)
|
||||
|
||||
def solve_lp(self, tee=False):
|
||||
self._apply_params()
|
||||
streams = [StringIO()]
|
||||
if tee:
|
||||
streams += [sys.stdout]
|
||||
for (varname, vardict) in self._bin_vars.items():
|
||||
for (idx, var) in vardict.items():
|
||||
var.vtype = self.GRB.CONTINUOUS
|
||||
var.lb = 0.0
|
||||
var.ub = 1.0
|
||||
with RedirectOutput(streams):
|
||||
self.model.optimize()
|
||||
for (varname, vardict) in self._bin_vars.items():
|
||||
for (idx, var) in vardict.items():
|
||||
var.vtype = self.GRB.BINARY
|
||||
log = streams[0].getvalue()
|
||||
return {
|
||||
"Optimal value": self.model.objVal,
|
||||
"Log": log
|
||||
}
|
||||
|
||||
def solve(self, tee=False):
|
||||
self.instance.found_violated_lazy_constraints = []
|
||||
self.instance.found_violated_user_cuts = []
|
||||
streams = [StringIO()]
|
||||
if tee:
|
||||
streams += [sys.stdout]
|
||||
|
||||
def cb(cb_model, cb_where):
|
||||
try:
|
||||
# User cuts
|
||||
if cb_where == self.GRB.Callback.MIPNODE:
|
||||
logger.debug("Finding violated cutting planes...")
|
||||
violations = self.instance.find_violated_user_cuts(cb_model)
|
||||
self.instance.found_violated_user_cuts += violations
|
||||
logger.debug(" %d found" % len(violations))
|
||||
for v in violations:
|
||||
cut = self.instance.build_user_cut(cb_model, v)
|
||||
cb_model.cbCut(cut)
|
||||
|
||||
# Lazy constraints
|
||||
if cb_where == self.GRB.Callback.MIPSOL:
|
||||
logger.debug("Finding violated lazy constraints...")
|
||||
violations = self.instance.find_violated_lazy_constraints(cb_model)
|
||||
self.instance.found_violated_lazy_constraints += violations
|
||||
logger.debug(" %d found" % len(violations))
|
||||
for v in violations:
|
||||
cut = self.instance.build_lazy_constraint(cb_model, v)
|
||||
cb_model.cbLazy(cut)
|
||||
except Exception as e:
|
||||
logger.error(e)
|
||||
|
||||
with RedirectOutput(streams):
|
||||
self.model.optimize(cb)
|
||||
log = streams[0].getvalue()
|
||||
return {
|
||||
"Lower bound": self.model.objVal,
|
||||
"Upper bound": self.model.objBound,
|
||||
"Wallclock time": self.model.runtime,
|
||||
"Nodes": int(self.model.nodeCount),
|
||||
"Sense": ("min" if self.model.modelSense == 1 else "max"),
|
||||
"Log": log,
|
||||
"Warm start value": self._extract_warm_start_value(log),
|
||||
}
|
||||
|
||||
def get_solution(self):
|
||||
solution = {}
|
||||
for (varname, vardict) in self._all_vars.items():
|
||||
solution[varname] = {}
|
||||
for (idx, var) in vardict.items():
|
||||
solution[varname][idx] = var.x
|
||||
return solution
|
||||
|
||||
def get_variables(self):
|
||||
variables = {}
|
||||
for (varname, vardict) in self._all_vars.items():
|
||||
variables[varname] = {}
|
||||
for (idx, var) in vardict.items():
|
||||
variables[varname] += [idx]
|
||||
return variables
|
||||
|
||||
def add_constraint(self, constraint):
|
||||
self.model.addConstr(constraint)
|
||||
|
||||
def set_warm_start(self, solution):
|
||||
count_fixed, count_total = 0, 0
|
||||
for (varname, vardict) in solution.items():
|
||||
for (idx, value) in vardict.items():
|
||||
count_total += 1
|
||||
if value is not None:
|
||||
count_fixed += 1
|
||||
self._all_vars[varname][idx].start = value
|
||||
logger.info("Setting start values for %d variables (out of %d)" %
|
||||
(count_fixed, count_total))
|
||||
|
||||
def clear_warm_start(self):
|
||||
for (varname, vardict) in self._all_vars:
|
||||
for (idx, var) in vardict.items():
|
||||
var[idx].start = self.GRB.UNDEFINED
|
||||
|
||||
def fix(self, solution):
|
||||
for (varname, vardict) in solution.items():
|
||||
for (idx, value) in vardict.items():
|
||||
if value is None:
|
||||
continue
|
||||
var = self._all_vars[varname][idx]
|
||||
var.vtype = self.GRB.CONTINUOUS
|
||||
var.lb = value
|
||||
var.ub = value
|
||||
|
||||
def set_branching_priorities(self, priorities):
|
||||
logger.warning("set_branching_priorities not implemented")
|
||||
|
||||
def set_threads(self, threads):
|
||||
self.params["Threads"] = threads
|
||||
|
||||
def set_time_limit(self, time_limit):
|
||||
self.params["TimeLimit"] = time_limit
|
||||
|
||||
def set_node_limit(self, node_limit):
|
||||
self.params["NodeLimit"] = node_limit
|
||||
|
||||
def set_gap_tolerance(self, gap_tolerance):
|
||||
self.params["MIPGap"] = gap_tolerance
|
||||
|
||||
def _extract_warm_start_value(self, log):
|
||||
ws = self.__extract(log, "MIP start with objective ([0-9.e+-]*)")
|
||||
if ws is not None:
|
||||
ws = float(ws)
|
||||
return ws
|
||||
|
||||
def __extract(self, log, regexp, default=None):
|
||||
value = default
|
||||
for line in log.splitlines():
|
||||
matches = re.findall(regexp, line)
|
||||
if len(matches) == 0:
|
||||
continue
|
||||
value = matches[0]
|
||||
return value
|
||||
|
||||
def __getstate__(self):
|
||||
return self.params
|
||||
|
||||
def __setstate__(self, state):
|
||||
self.params = state
|
||||
164
miplearn/solvers/internal.py
Normal file
164
miplearn/solvers/internal.py
Normal file
@@ -0,0 +1,164 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
import logging
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class InternalSolver(ABC):
|
||||
"""
|
||||
Abstract class representing the MIP solver used internally by LearningSolver.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def solve_lp(self, tee=False):
|
||||
"""
|
||||
Solves the LP relaxation of the currently loaded instance. After this
|
||||
method finishes, the solution can be retrieved by calling `get_solution`.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
tee: bool
|
||||
If true, prints the solver log to the screen.
|
||||
|
||||
Returns
|
||||
-------
|
||||
dict
|
||||
A dictionary of solver statistics containing the following keys:
|
||||
"Optimal value".
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_solution(self):
|
||||
"""
|
||||
Returns current solution found by the solver.
|
||||
|
||||
If called after `solve`, returns the best primal solution found during
|
||||
the search. If called after `solve_lp`, returns the optimal solution
|
||||
to the LP relaxation.
|
||||
|
||||
The solution is a dictionary `sol`, where the optimal value of `var[idx]`
|
||||
is given by `sol[var][idx]`.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def set_warm_start(self, solution):
|
||||
"""
|
||||
Sets the warm start to be used by the solver.
|
||||
|
||||
The solution should be a dictionary following the same format as the
|
||||
one produced by `get_solution`. Only one warm start is supported.
|
||||
Calling this function when a warm start already exists will
|
||||
remove the previous warm start.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def clear_warm_start(self):
|
||||
"""
|
||||
Removes any existing warm start from the solver.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def set_instance(self, instance, model=None):
|
||||
"""
|
||||
Loads the given instance into the solver.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
instance: miplearn.Instance
|
||||
The instance to be loaded.
|
||||
model:
|
||||
The concrete optimization model corresponding to this instance
|
||||
(e.g. JuMP.Model or pyomo.core.ConcreteModel). If not provided,
|
||||
it will be generated by calling `instance.to_model()`.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def fix(self, solution):
|
||||
"""
|
||||
Fixes the values of a subset of decision variables.
|
||||
|
||||
The values should be provided in the dictionary format generated by
|
||||
`get_solution`. Missing values in the solution indicate variables
|
||||
that should be left free.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def set_branching_priorities(self, priorities):
|
||||
"""
|
||||
Sets the branching priorities for the given decision variables.
|
||||
|
||||
When the MIP solver needs to decide on which variable to branch, variables
|
||||
with higher priority are picked first, given that they are fractional.
|
||||
Ties are solved arbitrarily. By default, all variables have priority zero.
|
||||
|
||||
The priorities should be provided in the dictionary format generated by
|
||||
`get_solution`. Missing values indicate variables whose priorities
|
||||
should not be modified.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def add_constraint(self, constraint):
|
||||
"""
|
||||
Adds a single constraint to the model.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def solve(self, tee=False):
|
||||
"""
|
||||
Solves the currently loaded instance. After this method finishes,
|
||||
the best solution found can be retrieved by calling `get_solution`.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
tee: bool
|
||||
If true, prints the solver log to the screen.
|
||||
|
||||
Returns
|
||||
-------
|
||||
dict
|
||||
A dictionary of solver statistics containing the following keys:
|
||||
"Lower bound", "Upper bound", "Wallclock time", "Nodes", "Sense",
|
||||
"Log" and "Warm start value".
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def set_threads(self, threads):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def set_time_limit(self, time_limit):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def set_node_limit(self, node_limit):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def set_gap_tolerance(self, gap_tolerance):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_variables(self):
|
||||
pass
|
||||
|
||||
def get_empty_solution(self):
|
||||
solution = {}
|
||||
for (var, indices) in self.get_variables().items():
|
||||
solution[var] = {}
|
||||
for idx in indices:
|
||||
solution[var][idx] = 0.0
|
||||
return solution
|
||||
|
||||
227
miplearn/solvers/learning.py
Normal file
227
miplearn/solvers/learning.py
Normal file
@@ -0,0 +1,227 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
import logging
|
||||
from copy import deepcopy
|
||||
from typing import Optional, List
|
||||
from p_tqdm import p_map
|
||||
|
||||
from .. import (ObjectiveValueComponent,
|
||||
PrimalSolutionComponent,
|
||||
LazyConstraintsComponent,
|
||||
UserCutsComponent)
|
||||
from .pyomo.cplex import CplexPyomoSolver
|
||||
from .pyomo.gurobi import GurobiPyomoSolver
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# Global memory for multiprocessing
|
||||
SOLVER = [None] # type: List[Optional[LearningSolver]]
|
||||
INSTANCES = [None] # type: List[Optional[dict]]
|
||||
|
||||
|
||||
def _parallel_solve(instance_idx):
|
||||
solver = deepcopy(SOLVER[0])
|
||||
instance = INSTANCES[0][instance_idx]
|
||||
results = solver.solve(instance)
|
||||
return {
|
||||
"Results": results,
|
||||
"Solution": instance.solution,
|
||||
"LP solution": instance.lp_solution,
|
||||
"Violated lazy constraints": instance.found_violated_lazy_constraints,
|
||||
"Violated user cuts": instance.found_violated_user_cuts,
|
||||
}
|
||||
|
||||
|
||||
class LearningSolver:
|
||||
"""
|
||||
Mixed-Integer Linear Programming (MIP) solver that extracts information
|
||||
from previous runs, using Machine Learning methods, to accelerate the
|
||||
solution of new (yet unseen) instances.
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
components=None,
|
||||
gap_tolerance=None,
|
||||
mode="exact",
|
||||
solver="gurobi",
|
||||
threads=None,
|
||||
time_limit=None,
|
||||
node_limit=None):
|
||||
self.components = {}
|
||||
self.mode = mode
|
||||
self.internal_solver = None
|
||||
self.internal_solver_factory = solver
|
||||
self.threads = threads
|
||||
self.time_limit = time_limit
|
||||
self.gap_tolerance = gap_tolerance
|
||||
self.tee = False
|
||||
self.node_limit = node_limit
|
||||
|
||||
if components is not None:
|
||||
for comp in components:
|
||||
self.add(comp)
|
||||
else:
|
||||
self.add(ObjectiveValueComponent())
|
||||
self.add(PrimalSolutionComponent())
|
||||
self.add(LazyConstraintsComponent())
|
||||
self.add(UserCutsComponent())
|
||||
|
||||
assert self.mode in ["exact", "heuristic"]
|
||||
for component in self.components.values():
|
||||
component.mode = self.mode
|
||||
|
||||
def _create_internal_solver(self):
|
||||
logger.debug("Initializing %s" % self.internal_solver_factory)
|
||||
if self.internal_solver_factory == "cplex":
|
||||
solver = CplexPyomoSolver()
|
||||
elif self.internal_solver_factory == "gurobi":
|
||||
solver = GurobiPyomoSolver()
|
||||
elif callable(self.internal_solver_factory):
|
||||
solver = self.internal_solver_factory()
|
||||
else:
|
||||
solver = self.internal_solver_factory
|
||||
if self.threads is not None:
|
||||
solver.set_threads(self.threads)
|
||||
if self.time_limit is not None:
|
||||
solver.set_time_limit(self.time_limit)
|
||||
if self.gap_tolerance is not None:
|
||||
solver.set_gap_tolerance(self.gap_tolerance)
|
||||
if self.node_limit is not None:
|
||||
solver.set_node_limit(self.node_limit)
|
||||
return solver
|
||||
|
||||
def solve(self,
|
||||
instance,
|
||||
model=None,
|
||||
tee=False,
|
||||
relaxation_only=False,
|
||||
solve_lp_first=True):
|
||||
"""
|
||||
Solves the given instance. If trained machine-learning models are
|
||||
available, they will be used to accelerate the solution process.
|
||||
|
||||
This method modifies the instance object. Specifically, the following
|
||||
properties are set:
|
||||
- instance.lp_solution
|
||||
- instance.lp_value
|
||||
- instance.lower_bound
|
||||
- instance.upper_bound
|
||||
- instance.solution
|
||||
- instance.found_violated_lazy_constraints
|
||||
- instance.solver_log
|
||||
|
||||
Additional solver components may set additional properties. Please
|
||||
see their documentation for more details.
|
||||
|
||||
If `solve_lp_first` is False, the properties lp_solution and lp_value
|
||||
will be set to dummy values.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
instance: miplearn.Instance
|
||||
The instance to be solved
|
||||
model: pyomo.core.ConcreteModel
|
||||
The corresponding Pyomo model. If not provided, it will be created.
|
||||
tee: bool
|
||||
If true, prints solver log to screen.
|
||||
relaxation_only: bool
|
||||
If true, solve only the root LP relaxation.
|
||||
solve_lp_first: bool
|
||||
If true, solve LP relaxation first, then solve original MILP. This
|
||||
option should be activated if the LP relaxation is not very
|
||||
expensive to solve and if it provides good hints for the integer
|
||||
solution.
|
||||
|
||||
Returns
|
||||
-------
|
||||
dict
|
||||
A dictionary of solver statistics containing at least the following
|
||||
keys: "Lower bound", "Upper bound", "Wallclock time", "Nodes",
|
||||
"Sense", "Log", "Warm start value" and "LP value".
|
||||
|
||||
Additional components may generate additional keys. For example,
|
||||
ObjectiveValueComponent adds the keys "Predicted LB" and
|
||||
"Predicted UB". See the documentation of each component for more
|
||||
details.
|
||||
"""
|
||||
|
||||
if model is None:
|
||||
model = instance.to_model()
|
||||
|
||||
self.tee = tee
|
||||
self.internal_solver = self._create_internal_solver()
|
||||
self.internal_solver.set_instance(instance, model)
|
||||
|
||||
if solve_lp_first:
|
||||
logger.debug("Solving LP relaxation...")
|
||||
results = self.internal_solver.solve_lp(tee=tee)
|
||||
instance.lp_solution = self.internal_solver.get_solution()
|
||||
instance.lp_value = results["Optimal value"]
|
||||
else:
|
||||
instance.lp_solution = self.internal_solver.get_empty_solution()
|
||||
instance.lp_value = 0.0
|
||||
|
||||
logger.debug("Running before_solve callbacks...")
|
||||
for component in self.components.values():
|
||||
component.before_solve(self, instance, model)
|
||||
|
||||
if relaxation_only:
|
||||
return results
|
||||
|
||||
results = self.internal_solver.solve(tee=tee)
|
||||
results["LP value"] = instance.lp_value
|
||||
|
||||
# Read MIP solution and bounds
|
||||
instance.lower_bound = results["Lower bound"]
|
||||
instance.upper_bound = results["Upper bound"]
|
||||
instance.solver_log = results["Log"]
|
||||
instance.solution = self.internal_solver.get_solution()
|
||||
|
||||
logger.debug("Calling after_solve callbacks...")
|
||||
for component in self.components.values():
|
||||
component.after_solve(self, instance, model, results)
|
||||
|
||||
return results
|
||||
|
||||
def parallel_solve(self,
|
||||
instances,
|
||||
n_jobs=4,
|
||||
label="Solve"):
|
||||
|
||||
self.internal_solver = None
|
||||
SOLVER[0] = self
|
||||
INSTANCES[0] = instances
|
||||
p_map_results = p_map(_parallel_solve,
|
||||
list(range(len(instances))),
|
||||
num_cpus=n_jobs,
|
||||
desc=label)
|
||||
|
||||
results = [p["Results"] for p in p_map_results]
|
||||
for (idx, r) in enumerate(p_map_results):
|
||||
instances[idx].solution = r["Solution"]
|
||||
instances[idx].lp_solution = r["LP solution"]
|
||||
instances[idx].lp_value = r["Results"]["LP value"]
|
||||
instances[idx].lower_bound = r["Results"]["Lower bound"]
|
||||
instances[idx].upper_bound = r["Results"]["Upper bound"]
|
||||
instances[idx].found_violated_lazy_constraints = r["Violated lazy constraints"]
|
||||
instances[idx].found_violated_user_cuts = r["Violated user cuts"]
|
||||
instances[idx].solver_log = r["Results"]["Log"]
|
||||
|
||||
return results
|
||||
|
||||
def fit(self, training_instances):
|
||||
if len(training_instances) == 0:
|
||||
return
|
||||
for component in self.components.values():
|
||||
component.fit(training_instances)
|
||||
|
||||
def add(self, component):
|
||||
name = component.__class__.__name__
|
||||
self.components[name] = component
|
||||
|
||||
def __getstate__(self):
|
||||
self.internal_solver = None
|
||||
return self.__dict__
|
||||
3
miplearn/solvers/pyomo/__init__.py
Normal file
3
miplearn/solvers/pyomo/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
223
miplearn/solvers/pyomo/base.py
Normal file
223
miplearn/solvers/pyomo/base.py
Normal file
@@ -0,0 +1,223 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
import re
|
||||
import sys
|
||||
import logging
|
||||
import pyomo
|
||||
from abc import abstractmethod
|
||||
from io import StringIO
|
||||
from pyomo import environ as pe
|
||||
from pyomo.core import Var
|
||||
|
||||
from .. import RedirectOutput
|
||||
from ..internal import InternalSolver
|
||||
from ...instance import Instance
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BasePyomoSolver(InternalSolver):
|
||||
"""
|
||||
Base class for all Pyomo solvers.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.instance = None
|
||||
self.model = None
|
||||
self._all_vars = None
|
||||
self._bin_vars = None
|
||||
self._is_warm_start_available = False
|
||||
self._pyomo_solver = None
|
||||
self._obj_sense = None
|
||||
self._varname_to_var = {}
|
||||
|
||||
def solve_lp(self, tee=False):
|
||||
for var in self._bin_vars:
|
||||
lb, ub = var.bounds
|
||||
var.setlb(lb)
|
||||
var.setub(ub)
|
||||
var.domain = pyomo.core.base.set_types.Reals
|
||||
self._pyomo_solver.update_var(var)
|
||||
results = self._pyomo_solver.solve(tee=tee)
|
||||
for var in self._bin_vars:
|
||||
var.domain = pyomo.core.base.set_types.Binary
|
||||
self._pyomo_solver.update_var(var)
|
||||
return {
|
||||
"Optimal value": results["Problem"][0]["Lower bound"],
|
||||
}
|
||||
|
||||
def get_solution(self):
|
||||
solution = {}
|
||||
for var in self.model.component_objects(Var):
|
||||
solution[str(var)] = {}
|
||||
for index in var:
|
||||
solution[str(var)][index] = var[index].value
|
||||
return solution
|
||||
|
||||
def get_variables(self):
|
||||
variables = {}
|
||||
for var in self.model.component_objects(Var):
|
||||
variables[str(var)] = []
|
||||
for index in var:
|
||||
variables[str(var)] += [index]
|
||||
return variables
|
||||
|
||||
def set_warm_start(self, solution):
|
||||
self.clear_warm_start()
|
||||
count_total, count_fixed = 0, 0
|
||||
for var_name in solution:
|
||||
var = self._varname_to_var[var_name]
|
||||
for index in solution[var_name]:
|
||||
count_total += 1
|
||||
var[index].value = solution[var_name][index]
|
||||
if solution[var_name][index] is not None:
|
||||
count_fixed += 1
|
||||
if count_fixed > 0:
|
||||
self._is_warm_start_available = True
|
||||
logger.info("Setting start values for %d variables (out of %d)" %
|
||||
(count_fixed, count_total))
|
||||
|
||||
def clear_warm_start(self):
|
||||
for var in self._all_vars:
|
||||
if not var.fixed:
|
||||
var.value = None
|
||||
self._is_warm_start_available = False
|
||||
|
||||
def set_instance(self, instance, model=None):
|
||||
if model is None:
|
||||
model = instance.to_model()
|
||||
assert isinstance(instance, Instance)
|
||||
assert isinstance(model, pe.ConcreteModel)
|
||||
self.instance = instance
|
||||
self.model = model
|
||||
self._pyomo_solver.set_instance(model)
|
||||
|
||||
# Update objective sense
|
||||
self._obj_sense = "max"
|
||||
if self._pyomo_solver._objective.sense == pyomo.core.kernel.objective.minimize:
|
||||
self._obj_sense = "min"
|
||||
|
||||
# Update variables
|
||||
self._all_vars = []
|
||||
self._bin_vars = []
|
||||
self._varname_to_var = {}
|
||||
for var in model.component_objects(Var):
|
||||
self._varname_to_var[var.name] = var
|
||||
for idx in var:
|
||||
self._all_vars += [var[idx]]
|
||||
if var[idx].domain == pyomo.core.base.set_types.Binary:
|
||||
self._bin_vars += [var[idx]]
|
||||
|
||||
def fix(self, solution):
|
||||
count_total, count_fixed = 0, 0
|
||||
for varname in solution:
|
||||
for index in solution[varname]:
|
||||
var = self._varname_to_var[varname]
|
||||
count_total += 1
|
||||
if solution[varname][index] is None:
|
||||
continue
|
||||
count_fixed += 1
|
||||
var[index].fix(solution[varname][index])
|
||||
self._pyomo_solver.update_var(var[index])
|
||||
logger.info("Fixing values for %d variables (out of %d)" %
|
||||
(count_fixed, count_total))
|
||||
|
||||
def add_constraint(self, constraint):
|
||||
self._pyomo_solver.add_constraint(constraint)
|
||||
|
||||
def solve(self, tee=False):
|
||||
total_wallclock_time = 0
|
||||
streams = [StringIO()]
|
||||
if tee:
|
||||
streams += [sys.stdout]
|
||||
self.instance.found_violated_lazy_constraints = []
|
||||
self.instance.found_violated_user_cuts = []
|
||||
while True:
|
||||
logger.debug("Solving MIP...")
|
||||
with RedirectOutput(streams):
|
||||
results = self._pyomo_solver.solve(tee=True,
|
||||
warmstart=self._is_warm_start_available)
|
||||
total_wallclock_time += results["Solver"][0]["Wallclock time"]
|
||||
logger.debug("Finding violated constraints...")
|
||||
violations = self.instance.find_violated_lazy_constraints(self.model)
|
||||
if len(violations) == 0:
|
||||
break
|
||||
self.instance.found_violated_lazy_constraints += violations
|
||||
logger.debug(" %d violations found" % len(violations))
|
||||
for v in violations:
|
||||
cut = self.instance.build_lazy_constraint(self.model, v)
|
||||
self.add_constraint(cut)
|
||||
|
||||
log = streams[0].getvalue()
|
||||
return {
|
||||
"Lower bound": results["Problem"][0]["Lower bound"],
|
||||
"Upper bound": results["Problem"][0]["Upper bound"],
|
||||
"Wallclock time": total_wallclock_time,
|
||||
"Nodes": self._extract_node_count(log),
|
||||
"Sense": self._obj_sense,
|
||||
"Log": log,
|
||||
"Warm start value": self._extract_warm_start_value(log),
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def __extract(log, regexp, default=None):
|
||||
value = default
|
||||
for line in log.splitlines():
|
||||
matches = re.findall(regexp, line)
|
||||
if len(matches) == 0:
|
||||
continue
|
||||
value = matches[0]
|
||||
return value
|
||||
|
||||
def _extract_warm_start_value(self, log):
|
||||
value = self.__extract(log, self._get_warm_start_regexp())
|
||||
if value is not None:
|
||||
value = float(value)
|
||||
return value
|
||||
|
||||
def _extract_node_count(self, log):
|
||||
return int(self.__extract(log,
|
||||
self._get_node_count_regexp(),
|
||||
default=1))
|
||||
|
||||
def set_threads(self, threads):
|
||||
key = self._get_threads_option_name()
|
||||
self._pyomo_solver.options[key] = threads
|
||||
|
||||
def set_time_limit(self, time_limit):
|
||||
key = self._get_time_limit_option_name()
|
||||
self._pyomo_solver.options[key] = time_limit
|
||||
|
||||
def set_node_limit(self, node_limit):
|
||||
key = self._get_node_limit_option_name()
|
||||
self._pyomo_solver.options[key] = node_limit
|
||||
|
||||
def set_gap_tolerance(self, gap_tolerance):
|
||||
key = self._get_gap_tolerance_option_name()
|
||||
self._pyomo_solver.options[key] = gap_tolerance
|
||||
|
||||
@abstractmethod
|
||||
def _get_warm_start_regexp(self):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def _get_node_count_regexp(self):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def _get_threads_option_name(self):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def _get_time_limit_option_name(self):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def _get_node_limit_option_name(self):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def _get_gap_tolerance_option_name(self):
|
||||
pass
|
||||
49
miplearn/solvers/pyomo/cplex.py
Normal file
49
miplearn/solvers/pyomo/cplex.py
Normal file
@@ -0,0 +1,49 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from pyomo import environ as pe
|
||||
from scipy.stats import randint
|
||||
|
||||
from .base import BasePyomoSolver
|
||||
|
||||
|
||||
class CplexPyomoSolver(BasePyomoSolver):
|
||||
def __init__(self, options=None):
|
||||
"""
|
||||
Creates a new CPLEX solver, accessed through Pyomo.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
options: dict
|
||||
Dictionary of options to pass to the Pyomo solver. For example,
|
||||
{"mip_display": 5} to increase the log verbosity.
|
||||
"""
|
||||
super().__init__()
|
||||
self._pyomo_solver = pe.SolverFactory('cplex_persistent')
|
||||
self._pyomo_solver.options["randomseed"] = randint(low=0, high=1000).rvs()
|
||||
self._pyomo_solver.options["mip_display"] = 4
|
||||
if options is not None:
|
||||
for (key, value) in options.items():
|
||||
self._pyomo_solver.options[key] = value
|
||||
|
||||
def _get_warm_start_regexp(self):
|
||||
return "MIP start .* with objective ([0-9.e+-]*)\\."
|
||||
|
||||
def _get_node_count_regexp(self):
|
||||
return "^[ *] *([0-9]+)"
|
||||
|
||||
def _get_threads_option_name(self):
|
||||
return "threads"
|
||||
|
||||
def _get_time_limit_option_name(self):
|
||||
return "timelimit"
|
||||
|
||||
def _get_node_limit_option_name(self):
|
||||
return "mip_limits_nodes"
|
||||
|
||||
def _get_gap_tolerance_option_name(self):
|
||||
return "mip_tolerances_mipgap"
|
||||
|
||||
def set_branching_priorities(self, priorities):
|
||||
raise NotImplementedError
|
||||
129
miplearn/solvers/pyomo/gurobi.py
Normal file
129
miplearn/solvers/pyomo/gurobi.py
Normal file
@@ -0,0 +1,129 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
import sys
|
||||
import logging
|
||||
from io import StringIO
|
||||
from pyomo import environ as pe
|
||||
from scipy.stats import randint
|
||||
|
||||
from .base import BasePyomoSolver
|
||||
from .. import RedirectOutput
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class GurobiPyomoSolver(BasePyomoSolver):
|
||||
def __init__(self,
|
||||
use_lazy_callbacks=True,
|
||||
options=None):
|
||||
"""
|
||||
Creates a new Gurobi solver, accessed through Pyomo.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
use_lazy_callbacks: bool
|
||||
If true, lazy constraints will be enforced via lazy callbacks.
|
||||
Otherwise, they will be enforced via a simple solve-check loop.
|
||||
options: dict
|
||||
Dictionary of options to pass to the Pyomo solver. For example,
|
||||
{"Threads": 4} to set the number of threads.
|
||||
"""
|
||||
super().__init__()
|
||||
self._use_lazy_callbacks = use_lazy_callbacks
|
||||
self._pyomo_solver = pe.SolverFactory('gurobi_persistent')
|
||||
self._pyomo_solver.options["Seed"] = randint(low=0, high=1000).rvs()
|
||||
if options is not None:
|
||||
for (key, value) in options.items():
|
||||
self._pyomo_solver.options[key] = value
|
||||
|
||||
def solve(self, tee=False):
|
||||
if self._use_lazy_callbacks:
|
||||
return self._solve_with_callbacks(tee)
|
||||
else:
|
||||
return super().solve(tee)
|
||||
|
||||
def _solve_with_callbacks(self, tee):
|
||||
from gurobipy import GRB
|
||||
|
||||
def cb(cb_model, cb_opt, cb_where):
|
||||
try:
|
||||
# User cuts
|
||||
if cb_where == GRB.Callback.MIPNODE:
|
||||
logger.debug("Finding violated cutting planes...")
|
||||
cb_opt.cbGetNodeRel(self._all_vars)
|
||||
violations = self.instance.find_violated_user_cuts(cb_model)
|
||||
self.instance.found_violated_user_cuts += violations
|
||||
logger.debug(" %d found" % len(violations))
|
||||
for v in violations:
|
||||
cut = self.instance.build_user_cut(cb_model, v)
|
||||
cb_opt.cbCut(cut)
|
||||
|
||||
# Lazy constraints
|
||||
if cb_where == GRB.Callback.MIPSOL:
|
||||
cb_opt.cbGetSolution(self._all_vars)
|
||||
logger.debug("Finding violated lazy constraints...")
|
||||
violations = self.instance.find_violated_lazy_constraints(cb_model)
|
||||
self.instance.found_violated_lazy_constraints += violations
|
||||
logger.debug(" %d found" % len(violations))
|
||||
for v in violations:
|
||||
cut = self.instance.build_lazy_constraint(cb_model, v)
|
||||
cb_opt.cbLazy(cut)
|
||||
except Exception as e:
|
||||
logger.error(e)
|
||||
|
||||
self._pyomo_solver.options["LazyConstraints"] = 1
|
||||
self._pyomo_solver.options["PreCrush"] = 1
|
||||
self._pyomo_solver.set_callback(cb)
|
||||
|
||||
self.instance.found_violated_lazy_constraints = []
|
||||
self.instance.found_violated_user_cuts = []
|
||||
|
||||
streams = [StringIO()]
|
||||
if tee:
|
||||
streams += [sys.stdout]
|
||||
with RedirectOutput(streams):
|
||||
results = self._pyomo_solver.solve(tee=True,
|
||||
warmstart=self._is_warm_start_available)
|
||||
|
||||
self._pyomo_solver.set_callback(None)
|
||||
log = streams[0].getvalue()
|
||||
return {
|
||||
"Lower bound": results["Problem"][0]["Lower bound"],
|
||||
"Upper bound": results["Problem"][0]["Upper bound"],
|
||||
"Wallclock time": results["Solver"][0]["Wallclock time"],
|
||||
"Nodes": self._extract_node_count(log),
|
||||
"Sense": self._obj_sense,
|
||||
"Log": log,
|
||||
"Warm start value": self._extract_warm_start_value(log),
|
||||
}
|
||||
|
||||
def _extract_node_count(self, log):
|
||||
return max(1, int(self._pyomo_solver._solver_model.getAttr("NodeCount")))
|
||||
|
||||
def _get_warm_start_regexp(self):
|
||||
return "MIP start with objective ([0-9.e+-]*)"
|
||||
|
||||
def _get_node_count_regexp(self):
|
||||
return None
|
||||
|
||||
def _get_threads_option_name(self):
|
||||
return "Threads"
|
||||
|
||||
def _get_time_limit_option_name(self):
|
||||
return "TimeLimit"
|
||||
|
||||
def _get_node_limit_option_name(self):
|
||||
return "NodeLimit"
|
||||
|
||||
def _get_gap_tolerance_option_name(self):
|
||||
return "MIPGap"
|
||||
|
||||
def set_branching_priorities(self, priorities):
|
||||
from gurobipy import GRB
|
||||
for varname in priorities.keys():
|
||||
var = self._varname_to_var[varname]
|
||||
for (index, priority) in priorities[varname].items():
|
||||
gvar = self._pyomo_solver._pyomo_var_to_solver_var_map[var[index]]
|
||||
gvar.setAttr(GRB.Attr.BranchPriority, int(round(priority)))
|
||||
26
miplearn/solvers/tests/__init__.py
Normal file
26
miplearn/solvers/tests/__init__.py
Normal file
@@ -0,0 +1,26 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from miplearn import BasePyomoSolver, GurobiSolver, GurobiPyomoSolver, CplexPyomoSolver
|
||||
from miplearn.problems.knapsack import KnapsackInstance, GurobiKnapsackInstance
|
||||
|
||||
|
||||
def _get_instance(solver):
|
||||
if issubclass(solver, BasePyomoSolver):
|
||||
return KnapsackInstance(
|
||||
weights=[23., 26., 20., 18.],
|
||||
prices=[505., 352., 458., 220.],
|
||||
capacity=67.,
|
||||
)
|
||||
if issubclass(solver, GurobiSolver):
|
||||
return GurobiKnapsackInstance(
|
||||
weights=[23., 26., 20., 18.],
|
||||
prices=[505., 352., 458., 220.],
|
||||
capacity=67.,
|
||||
)
|
||||
assert False
|
||||
|
||||
|
||||
def _get_internal_solvers():
|
||||
return [GurobiPyomoSolver, CplexPyomoSolver, GurobiSolver]
|
||||
115
miplearn/solvers/tests/test_internal_solver.py
Normal file
115
miplearn/solvers/tests/test_internal_solver.py
Normal file
@@ -0,0 +1,115 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
import logging
|
||||
from io import StringIO
|
||||
|
||||
import pyomo.environ as pe
|
||||
from miplearn import BasePyomoSolver
|
||||
from miplearn.problems.knapsack import ChallengeA
|
||||
from miplearn.solvers import RedirectOutput
|
||||
|
||||
from . import _get_instance, _get_internal_solvers
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def test_redirect_output():
|
||||
import sys
|
||||
original_stdout = sys.stdout
|
||||
io = StringIO()
|
||||
with RedirectOutput([io]):
|
||||
print("Hello world")
|
||||
assert sys.stdout == original_stdout
|
||||
assert io.getvalue() == "Hello world\n"
|
||||
|
||||
|
||||
def test_internal_solver_warm_starts():
|
||||
for solver_class in _get_internal_solvers():
|
||||
logger.info("Solver: %s" % solver_class)
|
||||
instance = _get_instance(solver_class)
|
||||
model = instance.to_model()
|
||||
solver = solver_class()
|
||||
solver.set_instance(instance, model)
|
||||
solver.set_warm_start({
|
||||
"x": {
|
||||
0: 1.0,
|
||||
1: 0.0,
|
||||
2: 0.0,
|
||||
3: 1.0,
|
||||
}
|
||||
})
|
||||
stats = solver.solve(tee=True)
|
||||
assert stats["Warm start value"] == 725.0
|
||||
|
||||
solver.set_warm_start({
|
||||
"x": {
|
||||
0: 1.0,
|
||||
1: 1.0,
|
||||
2: 1.0,
|
||||
3: 1.0,
|
||||
}
|
||||
})
|
||||
stats = solver.solve(tee=True)
|
||||
assert stats["Warm start value"] is None
|
||||
|
||||
solver.fix({
|
||||
"x": {
|
||||
0: 1.0,
|
||||
1: 0.0,
|
||||
2: 0.0,
|
||||
3: 1.0,
|
||||
}
|
||||
})
|
||||
stats = solver.solve(tee=True)
|
||||
assert stats["Lower bound"] == 725.0
|
||||
assert stats["Upper bound"] == 725.0
|
||||
|
||||
|
||||
def test_internal_solver():
|
||||
for solver_class in _get_internal_solvers():
|
||||
logger.info("Solver: %s" % solver_class)
|
||||
|
||||
instance = _get_instance(solver_class)
|
||||
model = instance.to_model()
|
||||
solver = solver_class()
|
||||
solver.set_instance(instance, model)
|
||||
|
||||
stats = solver.solve_lp()
|
||||
assert round(stats["Optimal value"], 3) == 1287.923
|
||||
|
||||
solution = solver.get_solution()
|
||||
assert round(solution["x"][0], 3) == 1.000
|
||||
assert round(solution["x"][1], 3) == 0.923
|
||||
assert round(solution["x"][2], 3) == 1.000
|
||||
assert round(solution["x"][3], 3) == 0.000
|
||||
|
||||
stats = solver.solve(tee=True)
|
||||
assert len(stats["Log"]) > 100
|
||||
assert stats["Lower bound"] == 1183.0
|
||||
assert stats["Upper bound"] == 1183.0
|
||||
assert stats["Sense"] == "max"
|
||||
assert isinstance(stats["Wallclock time"], float)
|
||||
assert isinstance(stats["Nodes"], int)
|
||||
|
||||
solution = solver.get_solution()
|
||||
assert solution["x"][0] == 1.0
|
||||
assert solution["x"][1] == 0.0
|
||||
assert solution["x"][2] == 1.0
|
||||
assert solution["x"][3] == 1.0
|
||||
|
||||
if isinstance(solver, BasePyomoSolver):
|
||||
model.cut = pe.Constraint(expr=model.x[0] <= 0.5)
|
||||
solver.add_constraint(model.cut)
|
||||
solver.solve_lp()
|
||||
assert model.x[0].value == 0.5
|
||||
|
||||
|
||||
# def test_node_count():
|
||||
# for solver in _get_internal_solvers():
|
||||
# challenge = ChallengeA()
|
||||
# solver.set_time_limit(1)
|
||||
# solver.set_instance(challenge.test_instances[0])
|
||||
# stats = solver.solve(tee=True)
|
||||
# assert stats["Nodes"] > 1
|
||||
67
miplearn/solvers/tests/test_learning_solver.py
Normal file
67
miplearn/solvers/tests/test_learning_solver.py
Normal file
@@ -0,0 +1,67 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
import logging
|
||||
import pickle
|
||||
import tempfile
|
||||
|
||||
from miplearn import LazyConstraintsComponent
|
||||
from miplearn import LearningSolver
|
||||
|
||||
from . import _get_instance, _get_internal_solvers
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def test_learning_solver():
|
||||
for mode in ["exact", "heuristic"]:
|
||||
for internal_solver in _get_internal_solvers():
|
||||
logger.info("Solver: %s" % internal_solver)
|
||||
instance = _get_instance(internal_solver)
|
||||
solver = LearningSolver(time_limit=300,
|
||||
gap_tolerance=1e-3,
|
||||
threads=1,
|
||||
solver=internal_solver,
|
||||
mode=mode)
|
||||
|
||||
solver.solve(instance)
|
||||
assert instance.solution["x"][0] == 1.0
|
||||
assert instance.solution["x"][1] == 0.0
|
||||
assert instance.solution["x"][2] == 1.0
|
||||
assert instance.solution["x"][3] == 1.0
|
||||
assert instance.lower_bound == 1183.0
|
||||
assert instance.upper_bound == 1183.0
|
||||
assert round(instance.lp_solution["x"][0], 3) == 1.000
|
||||
assert round(instance.lp_solution["x"][1], 3) == 0.923
|
||||
assert round(instance.lp_solution["x"][2], 3) == 1.000
|
||||
assert round(instance.lp_solution["x"][3], 3) == 0.000
|
||||
assert round(instance.lp_value, 3) == 1287.923
|
||||
assert instance.found_violated_lazy_constraints == []
|
||||
assert instance.found_violated_user_cuts == []
|
||||
assert len(instance.solver_log) > 100
|
||||
|
||||
solver.fit([instance])
|
||||
solver.solve(instance)
|
||||
|
||||
# Assert solver is picklable
|
||||
with tempfile.TemporaryFile() as file:
|
||||
pickle.dump(solver, file)
|
||||
|
||||
|
||||
def test_parallel_solve():
|
||||
for internal_solver in _get_internal_solvers():
|
||||
instances = [_get_instance(internal_solver) for _ in range(10)]
|
||||
solver = LearningSolver(solver=internal_solver)
|
||||
results = solver.parallel_solve(instances, n_jobs=3)
|
||||
assert len(results) == 10
|
||||
for instance in instances:
|
||||
assert len(instance.solution["x"].keys()) == 4
|
||||
|
||||
|
||||
def test_add_components():
|
||||
solver = LearningSolver(components=[])
|
||||
solver.add(LazyConstraintsComponent())
|
||||
solver.add(LazyConstraintsComponent())
|
||||
assert len(solver.components) == 1
|
||||
assert "BranchPriorityComponent" in solver.components
|
||||
25
miplearn/tests/__init__.py
Normal file
25
miplearn/tests/__init__.py
Normal file
@@ -0,0 +1,25 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
from miplearn import LearningSolver
|
||||
from miplearn.problems.knapsack import KnapsackInstance
|
||||
|
||||
|
||||
def get_test_pyomo_instances():
|
||||
instances = [
|
||||
KnapsackInstance(
|
||||
weights=[23., 26., 20., 18.],
|
||||
prices=[505., 352., 458., 220.],
|
||||
capacity=67.,
|
||||
),
|
||||
KnapsackInstance(
|
||||
weights=[25., 30., 22., 18.],
|
||||
prices=[500., 365., 420., 150.],
|
||||
capacity=70.,
|
||||
),
|
||||
]
|
||||
models = [instance.to_model() for instance in instances]
|
||||
solver = LearningSolver()
|
||||
for i in range(len(instances)):
|
||||
solver.solve(instances[i], models[i])
|
||||
return instances, models
|
||||
36
miplearn/tests/test_benchmark.py
Normal file
36
miplearn/tests/test_benchmark.py
Normal file
@@ -0,0 +1,36 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
import os.path
|
||||
|
||||
from miplearn import LearningSolver, BenchmarkRunner
|
||||
from miplearn.problems.stab import MaxWeightStableSetGenerator
|
||||
from scipy.stats import randint
|
||||
|
||||
|
||||
def test_benchmark():
|
||||
# Generate training and test instances
|
||||
train_instances = MaxWeightStableSetGenerator(n=randint(low=25, high=26)).generate(5)
|
||||
test_instances = MaxWeightStableSetGenerator(n=randint(low=25, high=26)).generate(3)
|
||||
|
||||
# Training phase...
|
||||
training_solver = LearningSolver()
|
||||
training_solver.parallel_solve(train_instances, n_jobs=10)
|
||||
|
||||
# Test phase...
|
||||
test_solvers = {
|
||||
"Strategy A": LearningSolver(),
|
||||
"Strategy B": LearningSolver(),
|
||||
}
|
||||
benchmark = BenchmarkRunner(test_solvers)
|
||||
benchmark.fit(train_instances)
|
||||
benchmark.parallel_solve(test_instances, n_jobs=2, n_trials=2)
|
||||
assert benchmark.raw_results().values.shape == (12,16)
|
||||
|
||||
benchmark.save_results("/tmp/benchmark.csv")
|
||||
assert os.path.isfile("/tmp/benchmark.csv")
|
||||
|
||||
benchmark = BenchmarkRunner(test_solvers)
|
||||
benchmark.load_results("/tmp/benchmark.csv")
|
||||
assert benchmark.raw_results().values.shape == (12,16)
|
||||
62
miplearn/tests/test_extractors.py
Normal file
62
miplearn/tests/test_extractors.py
Normal file
@@ -0,0 +1,62 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from miplearn.problems.knapsack import KnapsackInstance
|
||||
from miplearn import (LearningSolver,
|
||||
SolutionExtractor,
|
||||
InstanceFeaturesExtractor,
|
||||
VariableFeaturesExtractor,
|
||||
)
|
||||
import numpy as np
|
||||
import pyomo.environ as pe
|
||||
|
||||
|
||||
def _get_instances():
|
||||
instances = [
|
||||
KnapsackInstance(weights=[1., 2., 3.],
|
||||
prices=[10., 20., 30.],
|
||||
capacity=2.5,
|
||||
),
|
||||
KnapsackInstance(weights=[3., 4., 5.],
|
||||
prices=[20., 30., 40.],
|
||||
capacity=4.5,
|
||||
),
|
||||
]
|
||||
models = [instance.to_model() for instance in instances]
|
||||
solver = LearningSolver()
|
||||
for (i, instance) in enumerate(instances):
|
||||
solver.solve(instances[i], models[i])
|
||||
return instances, models
|
||||
|
||||
|
||||
def test_solution_extractor():
|
||||
instances, models = _get_instances()
|
||||
features = SolutionExtractor().extract(instances)
|
||||
assert isinstance(features, dict)
|
||||
assert "default" in features.keys()
|
||||
assert isinstance(features["default"], np.ndarray)
|
||||
assert features["default"].shape == (6, 2)
|
||||
assert features["default"].ravel().tolist() == [
|
||||
1., 0.,
|
||||
0., 1.,
|
||||
1., 0.,
|
||||
1., 0.,
|
||||
0., 1.,
|
||||
1., 0.,
|
||||
]
|
||||
|
||||
|
||||
def test_instance_features_extractor():
|
||||
instances, models = _get_instances()
|
||||
features = InstanceFeaturesExtractor().extract(instances)
|
||||
assert features.shape == (2,3)
|
||||
|
||||
|
||||
def test_variable_features_extractor():
|
||||
instances, models = _get_instances()
|
||||
features = VariableFeaturesExtractor().extract(instances)
|
||||
assert isinstance(features, dict)
|
||||
assert "default" in features
|
||||
assert features["default"].shape == (6,5)
|
||||
|
||||
Reference in New Issue
Block a user