mirror of
https://github.com/ANL-CEEESA/MIPLearn.git
synced 2025-12-06 01:18:52 -06:00
Reformat source code with Black; add pre-commit hooks and CI checks
This commit is contained in:
11
.github/workflows/lint.yml
vendored
Normal file
11
.github/workflows/lint.yml
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
name: Lint
|
||||
|
||||
on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
- uses: psf/black@stable
|
||||
6
.pre-commit-config.yaml
Normal file
6
.pre-commit-config.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
repos:
|
||||
- repo: https://github.com/ambv/black
|
||||
rev: stable
|
||||
hooks:
|
||||
- id: black
|
||||
args: ["--check"]
|
||||
3
Makefile
3
Makefile
@@ -34,6 +34,9 @@ install:
|
||||
uninstall:
|
||||
$(PIP) uninstall miplearn
|
||||
|
||||
reformat:
|
||||
$(PYTHON) -m black miplearn
|
||||
|
||||
test:
|
||||
$(PYTEST) $(PYTEST_ARGS)
|
||||
|
||||
|
||||
@@ -2,10 +2,12 @@
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from .extractors import (SolutionExtractor,
|
||||
InstanceFeaturesExtractor,
|
||||
ObjectiveValueExtractor,
|
||||
VariableFeaturesExtractor)
|
||||
from .extractors import (
|
||||
SolutionExtractor,
|
||||
InstanceFeaturesExtractor,
|
||||
ObjectiveValueExtractor,
|
||||
VariableFeaturesExtractor,
|
||||
)
|
||||
|
||||
from .components.component import Component
|
||||
from .components.objective import ObjectiveValueComponent
|
||||
|
||||
@@ -19,43 +19,53 @@ class BenchmarkRunner:
|
||||
assert isinstance(solver, LearningSolver)
|
||||
self.solvers = solvers
|
||||
self.results = None
|
||||
|
||||
|
||||
def solve(self, instances, tee=False):
|
||||
for (solver_name, solver) in self.solvers.items():
|
||||
for i in tqdm(range(len((instances)))):
|
||||
results = solver.solve(deepcopy(instances[i]), tee=tee)
|
||||
self._push_result(results, solver=solver, solver_name=solver_name, instance=i)
|
||||
self._push_result(
|
||||
results,
|
||||
solver=solver,
|
||||
solver_name=solver_name,
|
||||
instance=i,
|
||||
)
|
||||
|
||||
def parallel_solve(self,
|
||||
instances,
|
||||
n_jobs=1,
|
||||
n_trials=1,
|
||||
index_offset=0,
|
||||
):
|
||||
def parallel_solve(
|
||||
self,
|
||||
instances,
|
||||
n_jobs=1,
|
||||
n_trials=1,
|
||||
index_offset=0,
|
||||
):
|
||||
self._silence_miplearn_logger()
|
||||
trials = instances * n_trials
|
||||
for (solver_name, solver) in self.solvers.items():
|
||||
results = solver.parallel_solve(trials,
|
||||
n_jobs=n_jobs,
|
||||
label="Solve (%s)" % solver_name,
|
||||
output=None)
|
||||
results = solver.parallel_solve(
|
||||
trials,
|
||||
n_jobs=n_jobs,
|
||||
label="Solve (%s)" % solver_name,
|
||||
output=None,
|
||||
)
|
||||
for i in range(len(trials)):
|
||||
idx = (i % len(instances)) + index_offset
|
||||
self._push_result(results[i],
|
||||
solver=solver,
|
||||
solver_name=solver_name,
|
||||
instance=idx)
|
||||
self._push_result(
|
||||
results[i],
|
||||
solver=solver,
|
||||
solver_name=solver_name,
|
||||
instance=idx,
|
||||
)
|
||||
self._restore_miplearn_logger()
|
||||
|
||||
|
||||
def raw_results(self):
|
||||
return self.results
|
||||
|
||||
|
||||
def save_results(self, filename):
|
||||
self.results.to_csv(filename)
|
||||
|
||||
|
||||
def load_results(self, filename):
|
||||
self.results = pd.read_csv(filename, index_col=0)
|
||||
|
||||
|
||||
def load_state(self, filename):
|
||||
for (solver_name, solver) in self.solvers.items():
|
||||
solver.load_state(filename)
|
||||
@@ -63,62 +73,69 @@ class BenchmarkRunner:
|
||||
def fit(self, training_instances):
|
||||
for (solver_name, solver) in self.solvers.items():
|
||||
solver.fit(training_instances)
|
||||
|
||||
|
||||
def _push_result(self, result, solver, solver_name, instance):
|
||||
if self.results is None:
|
||||
self.results = pd.DataFrame(columns=["Solver",
|
||||
"Instance",
|
||||
"Wallclock Time",
|
||||
"Lower Bound",
|
||||
"Upper Bound",
|
||||
"Gap",
|
||||
"Nodes",
|
||||
"Mode",
|
||||
"Sense",
|
||||
"Predicted LB",
|
||||
"Predicted UB",
|
||||
])
|
||||
self.results = pd.DataFrame(
|
||||
columns=[
|
||||
"Solver",
|
||||
"Instance",
|
||||
"Wallclock Time",
|
||||
"Lower Bound",
|
||||
"Upper Bound",
|
||||
"Gap",
|
||||
"Nodes",
|
||||
"Mode",
|
||||
"Sense",
|
||||
"Predicted LB",
|
||||
"Predicted UB",
|
||||
]
|
||||
)
|
||||
lb = result["Lower bound"]
|
||||
ub = result["Upper bound"]
|
||||
gap = (ub - lb) / lb
|
||||
if "Predicted LB" not in result:
|
||||
result["Predicted LB"] = float("nan")
|
||||
result["Predicted UB"] = float("nan")
|
||||
self.results = self.results.append({
|
||||
"Solver": solver_name,
|
||||
"Instance": instance,
|
||||
"Wallclock Time": result["Wallclock time"],
|
||||
"Lower Bound": lb,
|
||||
"Upper Bound": ub,
|
||||
"Gap": gap,
|
||||
"Nodes": result["Nodes"],
|
||||
"Mode": solver.mode,
|
||||
"Sense": result["Sense"],
|
||||
"Predicted LB": result["Predicted LB"],
|
||||
"Predicted UB": result["Predicted UB"],
|
||||
}, ignore_index=True)
|
||||
self.results = self.results.append(
|
||||
{
|
||||
"Solver": solver_name,
|
||||
"Instance": instance,
|
||||
"Wallclock Time": result["Wallclock time"],
|
||||
"Lower Bound": lb,
|
||||
"Upper Bound": ub,
|
||||
"Gap": gap,
|
||||
"Nodes": result["Nodes"],
|
||||
"Mode": solver.mode,
|
||||
"Sense": result["Sense"],
|
||||
"Predicted LB": result["Predicted LB"],
|
||||
"Predicted UB": result["Predicted UB"],
|
||||
},
|
||||
ignore_index=True,
|
||||
)
|
||||
groups = self.results.groupby("Instance")
|
||||
best_lower_bound = groups["Lower Bound"].transform("max")
|
||||
best_upper_bound = groups["Upper Bound"].transform("min")
|
||||
best_gap = groups["Gap"].transform("min")
|
||||
best_nodes = np.maximum(1, groups["Nodes"].transform("min"))
|
||||
best_wallclock_time = groups["Wallclock Time"].transform("min")
|
||||
self.results["Relative Lower Bound"] = \
|
||||
self.results["Lower Bound"] / best_lower_bound
|
||||
self.results["Relative Upper Bound"] = \
|
||||
self.results["Upper Bound"] / best_upper_bound
|
||||
self.results["Relative Wallclock Time"] = \
|
||||
self.results["Wallclock Time"] / best_wallclock_time
|
||||
self.results["Relative Gap"] = \
|
||||
self.results["Gap"] / best_gap
|
||||
self.results["Relative Nodes"] = \
|
||||
self.results["Nodes"] / best_nodes
|
||||
self.results["Relative Lower Bound"] = (
|
||||
self.results["Lower Bound"] / best_lower_bound
|
||||
)
|
||||
self.results["Relative Upper Bound"] = (
|
||||
self.results["Upper Bound"] / best_upper_bound
|
||||
)
|
||||
self.results["Relative Wallclock Time"] = (
|
||||
self.results["Wallclock Time"] / best_wallclock_time
|
||||
)
|
||||
self.results["Relative Gap"] = self.results["Gap"] / best_gap
|
||||
self.results["Relative Nodes"] = self.results["Nodes"] / best_nodes
|
||||
|
||||
def save_chart(self, filename):
|
||||
import matplotlib.pyplot as plt
|
||||
import seaborn as sns
|
||||
from numpy import median
|
||||
|
||||
|
||||
sns.set_style("whitegrid")
|
||||
sns.set_palette("Blues_r")
|
||||
results = self.raw_results()
|
||||
@@ -134,71 +151,76 @@ class BenchmarkRunner:
|
||||
obj_column = "Lower Bound"
|
||||
predicted_obj_column = "Predicted LB"
|
||||
|
||||
fig, (ax1, ax2, ax3, ax4) = plt.subplots(nrows=1,
|
||||
ncols=4,
|
||||
figsize=(12,4),
|
||||
gridspec_kw={'width_ratios': [2, 1, 1, 2]})
|
||||
|
||||
fig, (ax1, ax2, ax3, ax4) = plt.subplots(
|
||||
nrows=1,
|
||||
ncols=4,
|
||||
figsize=(12, 4),
|
||||
gridspec_kw={"width_ratios": [2, 1, 1, 2]},
|
||||
)
|
||||
|
||||
# Figure 1: Solver x Wallclock Time
|
||||
sns.stripplot(x="Solver",
|
||||
y="Wallclock Time",
|
||||
data=results,
|
||||
ax=ax1,
|
||||
jitter=0.25,
|
||||
size=4.0,
|
||||
)
|
||||
sns.barplot(x="Solver",
|
||||
y="Wallclock Time",
|
||||
data=results,
|
||||
ax=ax1,
|
||||
errwidth=0.,
|
||||
alpha=0.4,
|
||||
estimator=median,
|
||||
)
|
||||
ax1.set(ylabel='Wallclock Time (s)')
|
||||
|
||||
sns.stripplot(
|
||||
x="Solver",
|
||||
y="Wallclock Time",
|
||||
data=results,
|
||||
ax=ax1,
|
||||
jitter=0.25,
|
||||
size=4.0,
|
||||
)
|
||||
sns.barplot(
|
||||
x="Solver",
|
||||
y="Wallclock Time",
|
||||
data=results,
|
||||
ax=ax1,
|
||||
errwidth=0.0,
|
||||
alpha=0.4,
|
||||
estimator=median,
|
||||
)
|
||||
ax1.set(ylabel="Wallclock Time (s)")
|
||||
|
||||
# Figure 2: Solver x Gap (%)
|
||||
ax2.set_ylim(-0.5, 5.5)
|
||||
sns.stripplot(x="Solver",
|
||||
y="Gap (%)",
|
||||
jitter=0.25,
|
||||
data=results[results["Mode"] != "heuristic"],
|
||||
ax=ax2,
|
||||
size=4.0,
|
||||
)
|
||||
|
||||
sns.stripplot(
|
||||
x="Solver",
|
||||
y="Gap (%)",
|
||||
jitter=0.25,
|
||||
data=results[results["Mode"] != "heuristic"],
|
||||
ax=ax2,
|
||||
size=4.0,
|
||||
)
|
||||
|
||||
# Figure 3: Solver x Primal Value
|
||||
ax3.set_ylim(0.95,1.05)
|
||||
sns.stripplot(x="Solver",
|
||||
y=primal_column,
|
||||
jitter=0.25,
|
||||
data=results[results["Mode"] == "heuristic"],
|
||||
ax=ax3,
|
||||
)
|
||||
ax3.set_ylim(0.95, 1.05)
|
||||
sns.stripplot(
|
||||
x="Solver",
|
||||
y=primal_column,
|
||||
jitter=0.25,
|
||||
data=results[results["Mode"] == "heuristic"],
|
||||
ax=ax3,
|
||||
)
|
||||
|
||||
# Figure 4: Predicted vs Actual Objective Value
|
||||
sns.scatterplot(x=obj_column,
|
||||
y=predicted_obj_column,
|
||||
hue="Solver",
|
||||
data=results[results["Mode"] != "heuristic"],
|
||||
ax=ax4,
|
||||
)
|
||||
sns.scatterplot(
|
||||
x=obj_column,
|
||||
y=predicted_obj_column,
|
||||
hue="Solver",
|
||||
data=results[results["Mode"] != "heuristic"],
|
||||
ax=ax4,
|
||||
)
|
||||
xlim, ylim = ax4.get_xlim(), ax4.get_ylim()
|
||||
ax4.plot([-1e10, 1e10], [-1e10, 1e10], ls='-', color="#cccccc")
|
||||
ax4.plot([-1e10, 1e10], [-1e10, 1e10], ls="-", color="#cccccc")
|
||||
ax4.set_xlim(xlim)
|
||||
ax4.set_ylim(ylim)
|
||||
ax4.get_legend().remove()
|
||||
|
||||
fig.tight_layout()
|
||||
plt.savefig(filename, bbox_inches='tight', dpi=150)
|
||||
|
||||
plt.savefig(filename, bbox_inches="tight", dpi=150)
|
||||
|
||||
def _silence_miplearn_logger(self):
|
||||
miplearn_logger = logging.getLogger("miplearn")
|
||||
self.prev_log_level = miplearn_logger.getEffectiveLevel()
|
||||
miplearn_logger.setLevel(logging.WARNING)
|
||||
|
||||
miplearn_logger.setLevel(logging.WARNING)
|
||||
|
||||
def _restore_miplearn_logger(self):
|
||||
miplearn_logger = logging.getLogger("miplearn")
|
||||
miplearn_logger.setLevel(self.prev_log_level)
|
||||
|
||||
|
||||
miplearn_logger.setLevel(self.prev_log_level)
|
||||
|
||||
@@ -22,9 +22,11 @@ class AdaptiveClassifier(Classifier):
|
||||
based on its cross-validation score on a particular training data set.
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
candidates=None,
|
||||
evaluator=ClassifierEvaluator()):
|
||||
def __init__(
|
||||
self,
|
||||
candidates=None,
|
||||
evaluator=ClassifierEvaluator(),
|
||||
):
|
||||
"""
|
||||
Initializes the meta-classifier.
|
||||
"""
|
||||
@@ -35,14 +37,13 @@ class AdaptiveClassifier(Classifier):
|
||||
"min samples": 100,
|
||||
},
|
||||
"logistic": {
|
||||
"classifier": make_pipeline(StandardScaler(),
|
||||
LogisticRegression()),
|
||||
"classifier": make_pipeline(StandardScaler(), LogisticRegression()),
|
||||
"min samples": 30,
|
||||
},
|
||||
"counting": {
|
||||
"classifier": CountingClassifier(),
|
||||
"min samples": 0,
|
||||
}
|
||||
},
|
||||
}
|
||||
self.candidates = candidates
|
||||
self.evaluator = evaluator
|
||||
|
||||
@@ -21,8 +21,7 @@ class CountingClassifier(Classifier):
|
||||
self.mean = np.mean(y_train)
|
||||
|
||||
def predict_proba(self, x_test):
|
||||
return np.array([[1 - self.mean, self.mean]
|
||||
for _ in range(x_test.shape[0])])
|
||||
return np.array([[1 - self.mean, self.mean] for _ in range(x_test.shape[0])])
|
||||
|
||||
def __repr__(self):
|
||||
return "CountingClassifier(mean=%s)" % self.mean
|
||||
|
||||
@@ -11,6 +11,7 @@ from sklearn.linear_model import LogisticRegression
|
||||
from sklearn.model_selection import cross_val_score
|
||||
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -28,12 +29,14 @@ class CrossValidatedClassifier(Classifier):
|
||||
acceptable. Other numbers are a linear interpolation of these two extremes.
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
classifier=LogisticRegression(),
|
||||
threshold=0.75,
|
||||
constant=0.0,
|
||||
cv=5,
|
||||
scoring='accuracy'):
|
||||
def __init__(
|
||||
self,
|
||||
classifier=LogisticRegression(),
|
||||
threshold=0.75,
|
||||
constant=0.0,
|
||||
cv=5,
|
||||
scoring="accuracy",
|
||||
):
|
||||
self.classifier = None
|
||||
self.classifier_prototype = classifier
|
||||
self.constant = constant
|
||||
@@ -45,24 +48,36 @@ class CrossValidatedClassifier(Classifier):
|
||||
# Calculate dummy score and absolute score threshold
|
||||
y_train_avg = np.average(y_train)
|
||||
dummy_score = max(y_train_avg, 1 - y_train_avg)
|
||||
absolute_threshold = 1. * self.threshold + dummy_score * (1 - self.threshold)
|
||||
absolute_threshold = 1.0 * self.threshold + dummy_score * (1 - self.threshold)
|
||||
|
||||
# Calculate cross validation score and decide which classifier to use
|
||||
clf = deepcopy(self.classifier_prototype)
|
||||
cv_score = float(np.mean(cross_val_score(clf,
|
||||
x_train,
|
||||
y_train,
|
||||
cv=self.cv,
|
||||
scoring=self.scoring)))
|
||||
cv_score = float(
|
||||
np.mean(
|
||||
cross_val_score(
|
||||
clf,
|
||||
x_train,
|
||||
y_train,
|
||||
cv=self.cv,
|
||||
scoring=self.scoring,
|
||||
)
|
||||
)
|
||||
)
|
||||
if cv_score >= absolute_threshold:
|
||||
logger.debug("cv_score is above threshold (%.2f >= %.2f); keeping" %
|
||||
(cv_score, absolute_threshold))
|
||||
logger.debug(
|
||||
"cv_score is above threshold (%.2f >= %.2f); keeping"
|
||||
% (cv_score, absolute_threshold)
|
||||
)
|
||||
self.classifier = clf
|
||||
else:
|
||||
logger.debug("cv_score is below threshold (%.2f < %.2f); discarding" %
|
||||
(cv_score, absolute_threshold))
|
||||
self.classifier = DummyClassifier(strategy="constant",
|
||||
constant=self.constant)
|
||||
logger.debug(
|
||||
"cv_score is below threshold (%.2f < %.2f); discarding"
|
||||
% (cv_score, absolute_threshold)
|
||||
)
|
||||
self.classifier = DummyClassifier(
|
||||
strategy="constant",
|
||||
constant=self.constant,
|
||||
)
|
||||
|
||||
# Train chosen classifier
|
||||
self.classifier.fit(x_train, y_train)
|
||||
|
||||
@@ -12,7 +12,6 @@ E = 0.1
|
||||
def test_counting():
|
||||
clf = CountingClassifier()
|
||||
clf.fit(np.zeros((8, 25)), [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0])
|
||||
expected_proba = np.array([[0.375, 0.625],
|
||||
[0.375, 0.625]])
|
||||
expected_proba = np.array([[0.375, 0.625], [0.375, 0.625]])
|
||||
actual_proba = clf.predict_proba(np.zeros((2, 25)))
|
||||
assert norm(actual_proba - expected_proba) < E
|
||||
|
||||
@@ -13,34 +13,36 @@ E = 0.1
|
||||
|
||||
def test_cv():
|
||||
# Training set: label is true if point is inside a 2D circle
|
||||
x_train = np.array([[x1, x2]
|
||||
for x1 in range(-10, 11)
|
||||
for x2 in range(-10, 11)])
|
||||
x_train = np.array([[x1, x2] for x1 in range(-10, 11) for x2 in range(-10, 11)])
|
||||
x_train = StandardScaler().fit_transform(x_train)
|
||||
n_samples = x_train.shape[0]
|
||||
|
||||
y_train = np.array([1.0 if x1*x1 + x2*x2 <= 100 else 0.0
|
||||
for x1 in range(-10, 11)
|
||||
for x2 in range(-10, 11)])
|
||||
y_train = np.array(
|
||||
[
|
||||
1.0 if x1 * x1 + x2 * x2 <= 100 else 0.0
|
||||
for x1 in range(-10, 11)
|
||||
for x2 in range(-10, 11)
|
||||
]
|
||||
)
|
||||
|
||||
# Support vector machines with linear kernels do not perform well on this
|
||||
# data set, so predictor should return the given constant.
|
||||
clf = CrossValidatedClassifier(classifier=SVC(probability=True,
|
||||
random_state=42),
|
||||
threshold=0.90,
|
||||
constant=0.0,
|
||||
cv=30)
|
||||
clf = CrossValidatedClassifier(
|
||||
classifier=SVC(probability=True, random_state=42),
|
||||
threshold=0.90,
|
||||
constant=0.0,
|
||||
cv=30,
|
||||
)
|
||||
clf.fit(x_train, y_train)
|
||||
assert norm(np.zeros(n_samples) - clf.predict(x_train)) < E
|
||||
|
||||
# Support vector machines with quadratic kernels perform almost perfectly
|
||||
# on this data set, so predictor should return their prediction.
|
||||
clf = CrossValidatedClassifier(classifier=SVC(probability=True,
|
||||
kernel='poly',
|
||||
degree=2,
|
||||
random_state=42),
|
||||
threshold=0.90,
|
||||
cv=30)
|
||||
clf = CrossValidatedClassifier(
|
||||
classifier=SVC(probability=True, kernel="poly", degree=2, random_state=42),
|
||||
threshold=0.90,
|
||||
cv=30,
|
||||
)
|
||||
clf.fit(x_train, y_train)
|
||||
print(y_train - clf.predict(x_train))
|
||||
assert norm(y_train - clf.predict(x_train)) < E
|
||||
|
||||
@@ -17,4 +17,3 @@ def test_evaluator():
|
||||
ev = ClassifierEvaluator()
|
||||
assert ev.evaluate(clf_a, x_train, y_train) == 1.0
|
||||
assert ev.evaluate(clf_b, x_train, y_train) == 0.5
|
||||
|
||||
|
||||
@@ -11,12 +11,16 @@ from miplearn.classifiers.threshold import MinPrecisionThreshold
|
||||
|
||||
def test_threshold_dynamic():
|
||||
clf = Mock(spec=Classifier)
|
||||
clf.predict_proba = Mock(return_value=np.array([
|
||||
[0.10, 0.90],
|
||||
[0.10, 0.90],
|
||||
[0.20, 0.80],
|
||||
[0.30, 0.70],
|
||||
]))
|
||||
clf.predict_proba = Mock(
|
||||
return_value=np.array(
|
||||
[
|
||||
[0.10, 0.90],
|
||||
[0.10, 0.90],
|
||||
[0.20, 0.80],
|
||||
[0.30, 0.70],
|
||||
]
|
||||
)
|
||||
)
|
||||
x_train = np.array([0, 1, 2, 3])
|
||||
y_train = np.array([1, 1, 0, 0])
|
||||
|
||||
@@ -31,4 +35,3 @@ def test_threshold_dynamic():
|
||||
|
||||
threshold = MinPrecisionThreshold(min_precision=0.00)
|
||||
assert threshold.find(clf, x_train, y_train) == 0.70
|
||||
|
||||
|
||||
@@ -30,11 +30,15 @@ class MinPrecisionThreshold(DynamicThreshold):
|
||||
def find(self, clf, x_train, y_train):
|
||||
proba = clf.predict_proba(x_train)
|
||||
|
||||
assert isinstance(proba, np.ndarray), \
|
||||
"classifier should return numpy array"
|
||||
assert proba.shape == (x_train.shape[0], 2), \
|
||||
"classifier should return (%d,%d)-shaped array, not %s" % (
|
||||
x_train.shape[0], 2, str(proba.shape))
|
||||
assert isinstance(proba, np.ndarray), "classifier should return numpy array"
|
||||
assert proba.shape == (
|
||||
x_train.shape[0],
|
||||
2,
|
||||
), "classifier should return (%d,%d)-shaped array, not %s" % (
|
||||
x_train.shape[0],
|
||||
2,
|
||||
str(proba.shape),
|
||||
)
|
||||
|
||||
fps, tps, thresholds = _binary_clf_curve(y_train, proba[:, 1])
|
||||
precision = tps / (tps + fps)
|
||||
|
||||
@@ -9,15 +9,15 @@ class Component(ABC):
|
||||
"""
|
||||
A Component is an object which adds functionality to a LearningSolver.
|
||||
"""
|
||||
|
||||
|
||||
@abstractmethod
|
||||
def before_solve(self, solver, instance, model):
|
||||
pass
|
||||
|
||||
|
||||
@abstractmethod
|
||||
def after_solve(self, solver, instance, model, results):
|
||||
pass
|
||||
|
||||
|
||||
@abstractmethod
|
||||
def fit(self, training_instances):
|
||||
pass
|
||||
|
||||
@@ -18,10 +18,12 @@ class UserCutsComponent(Component):
|
||||
"""
|
||||
A component that predicts which user cuts to enforce.
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
classifier=CountingClassifier(),
|
||||
threshold=0.05):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
classifier=CountingClassifier(),
|
||||
threshold=0.05,
|
||||
):
|
||||
self.violations = set()
|
||||
self.count = {}
|
||||
self.n_samples = 0
|
||||
@@ -40,7 +42,7 @@ class UserCutsComponent(Component):
|
||||
|
||||
def after_solve(self, solver, instance, model, results):
|
||||
pass
|
||||
|
||||
|
||||
def fit(self, training_instances):
|
||||
logger.debug("Fitting...")
|
||||
features = InstanceFeaturesExtractor().extract(training_instances)
|
||||
@@ -56,10 +58,11 @@ class UserCutsComponent(Component):
|
||||
violation_to_instance_idx[v] = []
|
||||
violation_to_instance_idx[v] += [idx]
|
||||
|
||||
for (v, classifier) in tqdm(self.classifiers.items(),
|
||||
desc="Fit (user cuts)",
|
||||
disable=not sys.stdout.isatty(),
|
||||
):
|
||||
for (v, classifier) in tqdm(
|
||||
self.classifiers.items(),
|
||||
desc="Fit (user cuts)",
|
||||
disable=not sys.stdout.isatty(),
|
||||
):
|
||||
logger.debug("Training: %s" % (str(v)))
|
||||
label = np.zeros(len(training_instances))
|
||||
label[violation_to_instance_idx[v]] = 1.0
|
||||
@@ -79,10 +82,11 @@ class UserCutsComponent(Component):
|
||||
all_violations = set()
|
||||
for instance in instances:
|
||||
all_violations |= set(instance.found_violated_user_cuts)
|
||||
for idx in tqdm(range(len(instances)),
|
||||
desc="Evaluate (lazy)",
|
||||
disable=not sys.stdout.isatty(),
|
||||
):
|
||||
for idx in tqdm(
|
||||
range(len(instances)),
|
||||
desc="Evaluate (lazy)",
|
||||
disable=not sys.stdout.isatty(),
|
||||
):
|
||||
instance = instances[idx]
|
||||
condition_positive = set(instance.found_violated_user_cuts)
|
||||
condition_negative = all_violations - condition_positive
|
||||
|
||||
@@ -18,10 +18,12 @@ class DynamicLazyConstraintsComponent(Component):
|
||||
"""
|
||||
A component that predicts which lazy constraints to enforce.
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
classifier=CountingClassifier(),
|
||||
threshold=0.05):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
classifier=CountingClassifier(),
|
||||
threshold=0.05,
|
||||
):
|
||||
self.violations = set()
|
||||
self.count = {}
|
||||
self.n_samples = 0
|
||||
@@ -52,7 +54,7 @@ class DynamicLazyConstraintsComponent(Component):
|
||||
|
||||
def after_solve(self, solver, instance, model, results):
|
||||
pass
|
||||
|
||||
|
||||
def fit(self, training_instances):
|
||||
logger.debug("Fitting...")
|
||||
features = InstanceFeaturesExtractor().extract(training_instances)
|
||||
@@ -68,10 +70,11 @@ class DynamicLazyConstraintsComponent(Component):
|
||||
violation_to_instance_idx[v] = []
|
||||
violation_to_instance_idx[v] += [idx]
|
||||
|
||||
for (v, classifier) in tqdm(self.classifiers.items(),
|
||||
desc="Fit (lazy)",
|
||||
disable=not sys.stdout.isatty(),
|
||||
):
|
||||
for (v, classifier) in tqdm(
|
||||
self.classifiers.items(),
|
||||
desc="Fit (lazy)",
|
||||
disable=not sys.stdout.isatty(),
|
||||
):
|
||||
logger.debug("Training: %s" % (str(v)))
|
||||
label = np.zeros(len(training_instances))
|
||||
label[violation_to_instance_idx[v]] = 1.0
|
||||
@@ -91,10 +94,11 @@ class DynamicLazyConstraintsComponent(Component):
|
||||
all_violations = set()
|
||||
for instance in instances:
|
||||
all_violations |= set(instance.found_violated_lazy_constraints)
|
||||
for idx in tqdm(range(len(instances)),
|
||||
desc="Evaluate (lazy)",
|
||||
disable=not sys.stdout.isatty(),
|
||||
):
|
||||
for idx in tqdm(
|
||||
range(len(instances)),
|
||||
desc="Evaluate (lazy)",
|
||||
disable=not sys.stdout.isatty(),
|
||||
):
|
||||
instance = instances[idx]
|
||||
condition_positive = set(instance.found_violated_lazy_constraints)
|
||||
condition_negative = all_violations - condition_positive
|
||||
|
||||
@@ -19,13 +19,14 @@ class LazyConstraint:
|
||||
|
||||
|
||||
class StaticLazyConstraintsComponent(Component):
|
||||
def __init__(self,
|
||||
classifier=CountingClassifier(),
|
||||
threshold=0.05,
|
||||
use_two_phase_gap=True,
|
||||
large_gap=1e-2,
|
||||
violation_tolerance=-0.5,
|
||||
):
|
||||
def __init__(
|
||||
self,
|
||||
classifier=CountingClassifier(),
|
||||
threshold=0.05,
|
||||
use_two_phase_gap=True,
|
||||
large_gap=1e-2,
|
||||
violation_tolerance=-0.5,
|
||||
):
|
||||
self.threshold = threshold
|
||||
self.classifier_prototype = classifier
|
||||
self.classifiers = {}
|
||||
@@ -74,32 +75,38 @@ class StaticLazyConstraintsComponent(Component):
|
||||
logger.debug("Finding violated lazy constraints...")
|
||||
constraints_to_add = []
|
||||
for c in self.pool:
|
||||
if not solver.internal_solver.is_constraint_satisfied(c.obj,
|
||||
tol=self.violation_tolerance):
|
||||
if not solver.internal_solver.is_constraint_satisfied(
|
||||
c.obj, tol=self.violation_tolerance
|
||||
):
|
||||
constraints_to_add.append(c)
|
||||
for c in constraints_to_add:
|
||||
self.pool.remove(c)
|
||||
solver.internal_solver.add_constraint(c.obj)
|
||||
instance.found_violated_lazy_constraints += [c.cid]
|
||||
if len(constraints_to_add) > 0:
|
||||
logger.info("%8d lazy constraints added %8d in the pool" % (len(constraints_to_add), len(self.pool)))
|
||||
logger.info(
|
||||
"%8d lazy constraints added %8d in the pool"
|
||||
% (len(constraints_to_add), len(self.pool))
|
||||
)
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def fit(self, training_instances):
|
||||
training_instances = [t
|
||||
for t in training_instances
|
||||
if hasattr(t, "found_violated_lazy_constraints")]
|
||||
training_instances = [
|
||||
t
|
||||
for t in training_instances
|
||||
if hasattr(t, "found_violated_lazy_constraints")
|
||||
]
|
||||
|
||||
logger.debug("Extracting x and y...")
|
||||
x = self.x(training_instances)
|
||||
y = self.y(training_instances)
|
||||
|
||||
logger.debug("Fitting...")
|
||||
for category in tqdm(x.keys(),
|
||||
desc="Fit (lazy)",
|
||||
disable=not sys.stdout.isatty()):
|
||||
for category in tqdm(
|
||||
x.keys(), desc="Fit (lazy)", disable=not sys.stdout.isatty()
|
||||
):
|
||||
if category not in self.classifiers:
|
||||
self.classifiers[category] = deepcopy(self.classifier_prototype)
|
||||
self.classifiers[category].fit(x[category], y[category])
|
||||
@@ -121,8 +128,10 @@ class StaticLazyConstraintsComponent(Component):
|
||||
x[category] = []
|
||||
constraints[category] = []
|
||||
x[category] += [instance.get_constraint_features(cid)]
|
||||
c = LazyConstraint(cid=cid,
|
||||
obj=solver.internal_solver.extract_constraint(cid))
|
||||
c = LazyConstraint(
|
||||
cid=cid,
|
||||
obj=solver.internal_solver.extract_constraint(cid),
|
||||
)
|
||||
constraints[category] += [c]
|
||||
self.pool.append(c)
|
||||
logger.info("%8d lazy constraints extracted" % len(self.pool))
|
||||
@@ -141,7 +150,13 @@ class StaticLazyConstraintsComponent(Component):
|
||||
self.pool.remove(c)
|
||||
solver.internal_solver.add_constraint(c.obj)
|
||||
instance.found_violated_lazy_constraints += [c.cid]
|
||||
logger.info("%8d lazy constraints added %8d in the pool" % (n_added, len(self.pool)))
|
||||
logger.info(
|
||||
"%8d lazy constraints added %8d in the pool"
|
||||
% (
|
||||
n_added,
|
||||
len(self.pool),
|
||||
)
|
||||
)
|
||||
|
||||
def _collect_constraints(self, train_instances):
|
||||
constraints = {}
|
||||
|
||||
@@ -1,13 +1,20 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
from sklearn.metrics import mean_squared_error, explained_variance_score, max_error, mean_absolute_error, r2_score
|
||||
from sklearn.metrics import (
|
||||
mean_squared_error,
|
||||
explained_variance_score,
|
||||
max_error,
|
||||
mean_absolute_error,
|
||||
r2_score,
|
||||
)
|
||||
|
||||
from .. import Component, InstanceFeaturesExtractor, ObjectiveValueExtractor
|
||||
from sklearn.linear_model import LinearRegression
|
||||
from copy import deepcopy
|
||||
import numpy as np
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -15,12 +22,12 @@ class ObjectiveValueComponent(Component):
|
||||
"""
|
||||
A Component which predicts the optimal objective value of the problem.
|
||||
"""
|
||||
def __init__(self,
|
||||
regressor=LinearRegression()):
|
||||
|
||||
def __init__(self, regressor=LinearRegression()):
|
||||
self.ub_regressor = None
|
||||
self.lb_regressor = None
|
||||
self.regressor_prototype = regressor
|
||||
|
||||
|
||||
def before_solve(self, solver, instance, model):
|
||||
if self.ub_regressor is not None:
|
||||
logger.info("Predicting optimal value...")
|
||||
@@ -28,7 +35,7 @@ class ObjectiveValueComponent(Component):
|
||||
instance.predicted_ub = ub
|
||||
instance.predicted_lb = lb
|
||||
logger.info("Predicted values: lb=%.2f, ub=%.2f" % (lb, ub))
|
||||
|
||||
|
||||
def after_solve(self, solver, instance, model, results):
|
||||
if self.ub_regressor is not None:
|
||||
results["Predicted UB"] = instance.predicted_ub
|
||||
@@ -36,7 +43,7 @@ class ObjectiveValueComponent(Component):
|
||||
else:
|
||||
results["Predicted UB"] = None
|
||||
results["Predicted LB"] = None
|
||||
|
||||
|
||||
def fit(self, training_instances):
|
||||
logger.debug("Extracting features...")
|
||||
features = InstanceFeaturesExtractor().extract(training_instances)
|
||||
@@ -50,7 +57,7 @@ class ObjectiveValueComponent(Component):
|
||||
self.ub_regressor.fit(features, ub.ravel())
|
||||
logger.debug("Fitting ub_regressor...")
|
||||
self.lb_regressor.fit(features, lb.ravel())
|
||||
|
||||
|
||||
def predict(self, instances):
|
||||
features = InstanceFeaturesExtractor().extract(instances)
|
||||
lb = self.lb_regressor.predict(features)
|
||||
|
||||
@@ -19,10 +19,12 @@ class PrimalSolutionComponent(Component):
|
||||
A component that predicts primal solutions.
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
classifier=AdaptiveClassifier(),
|
||||
mode="exact",
|
||||
threshold=MinPrecisionThreshold(0.98)):
|
||||
def __init__(
|
||||
self,
|
||||
classifier=AdaptiveClassifier(),
|
||||
mode="exact",
|
||||
threshold=MinPrecisionThreshold(0.98),
|
||||
):
|
||||
self.mode = mode
|
||||
self.classifiers = {}
|
||||
self.thresholds = {}
|
||||
@@ -51,9 +53,10 @@ class PrimalSolutionComponent(Component):
|
||||
features = VariableFeaturesExtractor().extract(training_instances)
|
||||
solutions = SolutionExtractor().extract(training_instances)
|
||||
|
||||
for category in tqdm(features.keys(),
|
||||
desc="Fit (primal)",
|
||||
):
|
||||
for category in tqdm(
|
||||
features.keys(),
|
||||
desc="Fit (primal)",
|
||||
):
|
||||
x_train = features[category]
|
||||
for label in [0, 1]:
|
||||
y_train = solutions[category][:, label].astype(int)
|
||||
@@ -74,9 +77,15 @@ class PrimalSolutionComponent(Component):
|
||||
|
||||
# Find threshold (dynamic or static)
|
||||
if isinstance(self.threshold_prototype, DynamicThreshold):
|
||||
self.thresholds[category, label] = self.threshold_prototype.find(clf, x_train, y_train)
|
||||
self.thresholds[category, label] = self.threshold_prototype.find(
|
||||
clf,
|
||||
x_train,
|
||||
y_train,
|
||||
)
|
||||
else:
|
||||
self.thresholds[category, label] = deepcopy(self.threshold_prototype)
|
||||
self.thresholds[category, label] = deepcopy(
|
||||
self.threshold_prototype
|
||||
)
|
||||
|
||||
self.classifiers[category, label] = clf
|
||||
|
||||
@@ -98,18 +107,21 @@ class PrimalSolutionComponent(Component):
|
||||
ws = np.array([[1 - clf, clf] for _ in range(n)])
|
||||
else:
|
||||
ws = clf.predict_proba(x_test[category])
|
||||
assert ws.shape == (n, 2), "ws.shape should be (%d, 2) not %s" % (n, ws.shape)
|
||||
assert ws.shape == (n, 2), "ws.shape should be (%d, 2) not %s" % (
|
||||
n,
|
||||
ws.shape,
|
||||
)
|
||||
for (i, (var, index)) in enumerate(var_split[category]):
|
||||
if ws[i, 1] >= self.thresholds[category, label]:
|
||||
solution[var][index] = label
|
||||
return solution
|
||||
|
||||
def evaluate(self, instances):
|
||||
ev = {"Fix zero": {},
|
||||
"Fix one": {}}
|
||||
for instance_idx in tqdm(range(len(instances)),
|
||||
desc="Evaluate (primal)",
|
||||
):
|
||||
ev = {"Fix zero": {}, "Fix one": {}}
|
||||
for instance_idx in tqdm(
|
||||
range(len(instances)),
|
||||
desc="Evaluate (primal)",
|
||||
):
|
||||
instance = instances[instance_idx]
|
||||
solution_actual = instance.solution
|
||||
solution_pred = self.predict(instance)
|
||||
@@ -143,6 +155,10 @@ class PrimalSolutionComponent(Component):
|
||||
tn_one = len(pred_one_negative & vars_zero)
|
||||
fn_one = len(pred_one_negative & vars_one)
|
||||
|
||||
ev["Fix zero"][instance_idx] = classifier_evaluation_dict(tp_zero, tn_zero, fp_zero, fn_zero)
|
||||
ev["Fix one"][instance_idx] = classifier_evaluation_dict(tp_one, tn_one, fp_one, fn_one)
|
||||
ev["Fix zero"][instance_idx] = classifier_evaluation_dict(
|
||||
tp_zero, tn_zero, fp_zero, fn_zero
|
||||
)
|
||||
ev["Fix one"][instance_idx] = classifier_evaluation_dict(
|
||||
tp_one, tn_one, fp_one, fn_one
|
||||
)
|
||||
return ev
|
||||
|
||||
@@ -51,14 +51,15 @@ class RelaxationComponent(Component):
|
||||
If `check_dropped` is true, set the maximum number of iterations in the lazy constraint loop.
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
classifier=CountingClassifier(),
|
||||
threshold=0.95,
|
||||
slack_tolerance=1e-5,
|
||||
check_dropped=False,
|
||||
violation_tolerance=1e-5,
|
||||
max_iterations=3,
|
||||
):
|
||||
def __init__(
|
||||
self,
|
||||
classifier=CountingClassifier(),
|
||||
threshold=0.95,
|
||||
slack_tolerance=1e-5,
|
||||
check_dropped=False,
|
||||
violation_tolerance=1e-5,
|
||||
max_iterations=3,
|
||||
):
|
||||
self.classifiers = {}
|
||||
self.classifier_prototype = classifier
|
||||
self.threshold = threshold
|
||||
@@ -77,16 +78,20 @@ class RelaxationComponent(Component):
|
||||
|
||||
logger.info("Predicting redundant LP constraints...")
|
||||
cids = solver.internal_solver.get_constraint_ids()
|
||||
x, constraints = self.x([instance],
|
||||
constraint_ids=cids,
|
||||
return_constraints=True)
|
||||
x, constraints = self.x(
|
||||
[instance],
|
||||
constraint_ids=cids,
|
||||
return_constraints=True,
|
||||
)
|
||||
y = self.predict(x)
|
||||
for category in y.keys():
|
||||
for i in range(len(y[category])):
|
||||
if y[category][i][0] == 1:
|
||||
cid = constraints[category][i]
|
||||
c = LazyConstraint(cid=cid,
|
||||
obj=solver.internal_solver.extract_constraint(cid))
|
||||
c = LazyConstraint(
|
||||
cid=cid,
|
||||
obj=solver.internal_solver.extract_constraint(cid),
|
||||
)
|
||||
self.pool += [c]
|
||||
logger.info("Extracted %d predicted constraints" % len(self.pool))
|
||||
|
||||
@@ -98,21 +103,19 @@ class RelaxationComponent(Component):
|
||||
x = self.x(training_instances)
|
||||
y = self.y(training_instances)
|
||||
logger.debug("Fitting...")
|
||||
for category in tqdm(x.keys(),
|
||||
desc="Fit (relaxation)"):
|
||||
for category in tqdm(x.keys(), desc="Fit (relaxation)"):
|
||||
if category not in self.classifiers:
|
||||
self.classifiers[category] = deepcopy(self.classifier_prototype)
|
||||
self.classifiers[category].fit(x[category], y[category])
|
||||
|
||||
def x(self,
|
||||
instances,
|
||||
constraint_ids=None,
|
||||
return_constraints=False):
|
||||
def x(self, instances, constraint_ids=None, return_constraints=False):
|
||||
x = {}
|
||||
constraints = {}
|
||||
for instance in tqdm(InstanceIterator(instances),
|
||||
desc="Extract (relaxation:x)",
|
||||
disable=len(instances) < 5):
|
||||
for instance in tqdm(
|
||||
InstanceIterator(instances),
|
||||
desc="Extract (relaxation:x)",
|
||||
disable=len(instances) < 5,
|
||||
):
|
||||
if constraint_ids is not None:
|
||||
cids = constraint_ids
|
||||
else:
|
||||
@@ -133,9 +136,11 @@ class RelaxationComponent(Component):
|
||||
|
||||
def y(self, instances):
|
||||
y = {}
|
||||
for instance in tqdm(InstanceIterator(instances),
|
||||
desc="Extract (relaxation:y)",
|
||||
disable=len(instances) < 5):
|
||||
for instance in tqdm(
|
||||
InstanceIterator(instances),
|
||||
desc="Extract (relaxation:y)",
|
||||
disable=len(instances) < 5,
|
||||
):
|
||||
for (cid, slack) in instance.slacks.items():
|
||||
category = instance.get_constraint_category(cid)
|
||||
if category is None:
|
||||
@@ -154,7 +159,7 @@ class RelaxationComponent(Component):
|
||||
if category not in self.classifiers:
|
||||
continue
|
||||
y[category] = []
|
||||
#x_cat = np.array(x_cat)
|
||||
# x_cat = np.array(x_cat)
|
||||
proba = self.classifiers[category].predict_proba(x_cat)
|
||||
for i in range(len(proba)):
|
||||
if proba[i][1] >= self.threshold:
|
||||
@@ -191,13 +196,19 @@ class RelaxationComponent(Component):
|
||||
logger.debug("Checking that dropped constraints are satisfied...")
|
||||
constraints_to_add = []
|
||||
for c in self.pool:
|
||||
if not solver.internal_solver.is_constraint_satisfied(c.obj, self.violation_tolerance):
|
||||
if not solver.internal_solver.is_constraint_satisfied(
|
||||
c.obj,
|
||||
self.violation_tolerance,
|
||||
):
|
||||
constraints_to_add.append(c)
|
||||
for c in constraints_to_add:
|
||||
self.pool.remove(c)
|
||||
solver.internal_solver.add_constraint(c.obj)
|
||||
if len(constraints_to_add) > 0:
|
||||
logger.info("%8d constraints %8d in the pool" % (len(constraints_to_add), len(self.pool)))
|
||||
logger.info(
|
||||
"%8d constraints %8d in the pool"
|
||||
% (len(constraints_to_add), len(self.pool))
|
||||
)
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
@@ -28,9 +28,9 @@ def test_lazy_fit():
|
||||
assert "c" in component.classifiers
|
||||
|
||||
# Should provide correct x_train to each classifier
|
||||
expected_x_train_a = np.array([[67., 21.75, 1287.92], [70., 23.75, 1199.83]])
|
||||
expected_x_train_b = np.array([[67., 21.75, 1287.92], [70., 23.75, 1199.83]])
|
||||
expected_x_train_c = np.array([[67., 21.75, 1287.92], [70., 23.75, 1199.83]])
|
||||
expected_x_train_a = np.array([[67.0, 21.75, 1287.92], [70.0, 23.75, 1199.83]])
|
||||
expected_x_train_b = np.array([[67.0, 21.75, 1287.92], [70.0, 23.75, 1199.83]])
|
||||
expected_x_train_c = np.array([[67.0, 21.75, 1287.92], [70.0, 23.75, 1199.83]])
|
||||
actual_x_train_a = component.classifiers["a"].fit.call_args[0][0]
|
||||
actual_x_train_b = component.classifiers["b"].fit.call_args[0][0]
|
||||
actual_x_train_c = component.classifiers["c"].fit.call_args[0][0]
|
||||
@@ -56,16 +56,15 @@ def test_lazy_before():
|
||||
solver = LearningSolver()
|
||||
solver.internal_solver = Mock(spec=InternalSolver)
|
||||
component = DynamicLazyConstraintsComponent(threshold=0.10)
|
||||
component.classifiers = {"a": Mock(spec=Classifier),
|
||||
"b": Mock(spec=Classifier)}
|
||||
component.classifiers = {"a": Mock(spec=Classifier), "b": Mock(spec=Classifier)}
|
||||
component.classifiers["a"].predict_proba = Mock(return_value=[[0.95, 0.05]])
|
||||
component.classifiers["b"].predict_proba = Mock(return_value=[[0.02, 0.80]])
|
||||
|
||||
component.before_solve(solver, instances[0], models[0])
|
||||
|
||||
# Should ask classifier likelihood of each constraint being violated
|
||||
expected_x_test_a = np.array([[67., 21.75, 1287.92]])
|
||||
expected_x_test_b = np.array([[67., 21.75, 1287.92]])
|
||||
expected_x_test_a = np.array([[67.0, 21.75, 1287.92]])
|
||||
expected_x_test_b = np.array([[67.0, 21.75, 1287.92]])
|
||||
actual_x_test_a = component.classifiers["a"].predict_proba.call_args[0][0]
|
||||
actual_x_test_b = component.classifiers["b"].predict_proba.call_args[0][0]
|
||||
assert norm(expected_x_test_a - actual_x_test_a) < E
|
||||
@@ -82,13 +81,15 @@ def test_lazy_before():
|
||||
def test_lazy_evaluate():
|
||||
instances, models = get_test_pyomo_instances()
|
||||
component = DynamicLazyConstraintsComponent()
|
||||
component.classifiers = {"a": Mock(spec=Classifier),
|
||||
"b": Mock(spec=Classifier),
|
||||
"c": Mock(spec=Classifier)}
|
||||
component.classifiers = {
|
||||
"a": Mock(spec=Classifier),
|
||||
"b": Mock(spec=Classifier),
|
||||
"c": Mock(spec=Classifier),
|
||||
}
|
||||
component.classifiers["a"].predict_proba = Mock(return_value=[[1.0, 0.0]])
|
||||
component.classifiers["b"].predict_proba = Mock(return_value=[[0.0, 1.0]])
|
||||
component.classifiers["c"].predict_proba = Mock(return_value=[[0.0, 1.0]])
|
||||
|
||||
|
||||
instances[0].found_violated_lazy_constraints = ["a", "b", "c"]
|
||||
instances[1].found_violated_lazy_constraints = ["b", "d"]
|
||||
assert component.evaluate(instances) == {
|
||||
@@ -96,7 +97,7 @@ def test_lazy_evaluate():
|
||||
"Accuracy": 0.75,
|
||||
"F1 score": 0.8,
|
||||
"Precision": 1.0,
|
||||
"Recall": 2/3.,
|
||||
"Recall": 2 / 3.0,
|
||||
"Predicted positive": 2,
|
||||
"Predicted negative": 2,
|
||||
"Condition positive": 3,
|
||||
@@ -135,6 +136,5 @@ def test_lazy_evaluate():
|
||||
"False positive (%)": 25.0,
|
||||
"True negative (%)": 25.0,
|
||||
"True positive (%)": 25.0,
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
@@ -4,10 +4,12 @@
|
||||
|
||||
from unittest.mock import Mock, call
|
||||
|
||||
from miplearn import (StaticLazyConstraintsComponent,
|
||||
LearningSolver,
|
||||
Instance,
|
||||
InternalSolver)
|
||||
from miplearn import (
|
||||
StaticLazyConstraintsComponent,
|
||||
LearningSolver,
|
||||
Instance,
|
||||
InternalSolver,
|
||||
)
|
||||
from miplearn.classifiers import Classifier
|
||||
|
||||
|
||||
@@ -23,39 +25,47 @@ def test_usage_with_solver():
|
||||
|
||||
instance = Mock(spec=Instance)
|
||||
instance.has_static_lazy_constraints = Mock(return_value=True)
|
||||
instance.is_constraint_lazy = Mock(side_effect=lambda cid: {
|
||||
"c1": False,
|
||||
"c2": True,
|
||||
"c3": True,
|
||||
"c4": True,
|
||||
}[cid])
|
||||
instance.get_constraint_features = Mock(side_effect=lambda cid: {
|
||||
"c2": [1.0, 0.0],
|
||||
"c3": [0.5, 0.5],
|
||||
"c4": [1.0],
|
||||
}[cid])
|
||||
instance.get_constraint_category = Mock(side_effect=lambda cid: {
|
||||
"c2": "type-a",
|
||||
"c3": "type-a",
|
||||
"c4": "type-b",
|
||||
}[cid])
|
||||
instance.is_constraint_lazy = Mock(
|
||||
side_effect=lambda cid: {
|
||||
"c1": False,
|
||||
"c2": True,
|
||||
"c3": True,
|
||||
"c4": True,
|
||||
}[cid]
|
||||
)
|
||||
instance.get_constraint_features = Mock(
|
||||
side_effect=lambda cid: {
|
||||
"c2": [1.0, 0.0],
|
||||
"c3": [0.5, 0.5],
|
||||
"c4": [1.0],
|
||||
}[cid]
|
||||
)
|
||||
instance.get_constraint_category = Mock(
|
||||
side_effect=lambda cid: {
|
||||
"c2": "type-a",
|
||||
"c3": "type-a",
|
||||
"c4": "type-b",
|
||||
}[cid]
|
||||
)
|
||||
|
||||
component = StaticLazyConstraintsComponent(threshold=0.90,
|
||||
use_two_phase_gap=False,
|
||||
violation_tolerance=1.0)
|
||||
component = StaticLazyConstraintsComponent(
|
||||
threshold=0.90, use_two_phase_gap=False, violation_tolerance=1.0
|
||||
)
|
||||
component.classifiers = {
|
||||
"type-a": Mock(spec=Classifier),
|
||||
"type-b": Mock(spec=Classifier),
|
||||
}
|
||||
component.classifiers["type-a"].predict_proba = \
|
||||
Mock(return_value=[
|
||||
component.classifiers["type-a"].predict_proba = Mock(
|
||||
return_value=[
|
||||
[0.20, 0.80],
|
||||
[0.05, 0.95],
|
||||
])
|
||||
component.classifiers["type-b"].predict_proba = \
|
||||
Mock(return_value=[
|
||||
]
|
||||
)
|
||||
component.classifiers["type-b"].predict_proba = Mock(
|
||||
return_value=[
|
||||
[0.02, 0.98],
|
||||
])
|
||||
]
|
||||
)
|
||||
|
||||
# LearningSolver calls before_solve
|
||||
component.before_solve(solver, instance, None)
|
||||
@@ -67,37 +77,59 @@ def test_usage_with_solver():
|
||||
internal.get_constraint_ids.assert_called_once()
|
||||
|
||||
# Should ask if each constraint in the model is lazy
|
||||
instance.is_constraint_lazy.assert_has_calls([
|
||||
call("c1"), call("c2"), call("c3"), call("c4"),
|
||||
])
|
||||
instance.is_constraint_lazy.assert_has_calls(
|
||||
[
|
||||
call("c1"),
|
||||
call("c2"),
|
||||
call("c3"),
|
||||
call("c4"),
|
||||
]
|
||||
)
|
||||
|
||||
# For the lazy ones, should ask for features
|
||||
instance.get_constraint_features.assert_has_calls([
|
||||
call("c2"), call("c3"), call("c4"),
|
||||
])
|
||||
instance.get_constraint_features.assert_has_calls(
|
||||
[
|
||||
call("c2"),
|
||||
call("c3"),
|
||||
call("c4"),
|
||||
]
|
||||
)
|
||||
|
||||
# Should also ask for categories
|
||||
assert instance.get_constraint_category.call_count == 3
|
||||
instance.get_constraint_category.assert_has_calls([
|
||||
call("c2"), call("c3"), call("c4"),
|
||||
])
|
||||
instance.get_constraint_category.assert_has_calls(
|
||||
[
|
||||
call("c2"),
|
||||
call("c3"),
|
||||
call("c4"),
|
||||
]
|
||||
)
|
||||
|
||||
# Should ask internal solver to remove constraints identified as lazy
|
||||
assert internal.extract_constraint.call_count == 3
|
||||
internal.extract_constraint.assert_has_calls([
|
||||
call("c2"), call("c3"), call("c4"),
|
||||
])
|
||||
internal.extract_constraint.assert_has_calls(
|
||||
[
|
||||
call("c2"),
|
||||
call("c3"),
|
||||
call("c4"),
|
||||
]
|
||||
)
|
||||
|
||||
# Should ask ML to predict whether each lazy constraint should be enforced
|
||||
component.classifiers["type-a"].predict_proba.assert_called_once_with([[1.0, 0.0], [0.5, 0.5]])
|
||||
component.classifiers["type-a"].predict_proba.assert_called_once_with(
|
||||
[[1.0, 0.0], [0.5, 0.5]]
|
||||
)
|
||||
component.classifiers["type-b"].predict_proba.assert_called_once_with([[1.0]])
|
||||
|
||||
# For the ones that should be enforced, should ask solver to re-add them
|
||||
# to the formulation. The remaining ones should remain in the pool.
|
||||
assert internal.add_constraint.call_count == 2
|
||||
internal.add_constraint.assert_has_calls([
|
||||
call("<c3>"), call("<c4>"),
|
||||
])
|
||||
internal.add_constraint.assert_has_calls(
|
||||
[
|
||||
call("<c3>"),
|
||||
call("<c4>"),
|
||||
]
|
||||
)
|
||||
internal.add_constraint.reset_mock()
|
||||
|
||||
# LearningSolver calls after_iteration (first time)
|
||||
@@ -126,37 +158,45 @@ def test_usage_with_solver():
|
||||
def test_fit():
|
||||
instance_1 = Mock(spec=Instance)
|
||||
instance_1.found_violated_lazy_constraints = ["c1", "c2", "c4", "c5"]
|
||||
instance_1.get_constraint_category = Mock(side_effect=lambda cid: {
|
||||
"c1": "type-a",
|
||||
"c2": "type-a",
|
||||
"c3": "type-a",
|
||||
"c4": "type-b",
|
||||
"c5": "type-b",
|
||||
}[cid])
|
||||
instance_1.get_constraint_features = Mock(side_effect=lambda cid: {
|
||||
"c1": [1, 1],
|
||||
"c2": [1, 2],
|
||||
"c3": [1, 3],
|
||||
"c4": [1, 4, 0],
|
||||
"c5": [1, 5, 0],
|
||||
}[cid])
|
||||
instance_1.get_constraint_category = Mock(
|
||||
side_effect=lambda cid: {
|
||||
"c1": "type-a",
|
||||
"c2": "type-a",
|
||||
"c3": "type-a",
|
||||
"c4": "type-b",
|
||||
"c5": "type-b",
|
||||
}[cid]
|
||||
)
|
||||
instance_1.get_constraint_features = Mock(
|
||||
side_effect=lambda cid: {
|
||||
"c1": [1, 1],
|
||||
"c2": [1, 2],
|
||||
"c3": [1, 3],
|
||||
"c4": [1, 4, 0],
|
||||
"c5": [1, 5, 0],
|
||||
}[cid]
|
||||
)
|
||||
|
||||
instance_2 = Mock(spec=Instance)
|
||||
instance_2.found_violated_lazy_constraints = ["c2", "c3", "c4"]
|
||||
instance_2.get_constraint_category = Mock(side_effect=lambda cid: {
|
||||
"c1": "type-a",
|
||||
"c2": "type-a",
|
||||
"c3": "type-a",
|
||||
"c4": "type-b",
|
||||
"c5": "type-b",
|
||||
}[cid])
|
||||
instance_2.get_constraint_features = Mock(side_effect=lambda cid: {
|
||||
"c1": [2, 1],
|
||||
"c2": [2, 2],
|
||||
"c3": [2, 3],
|
||||
"c4": [2, 4, 0],
|
||||
"c5": [2, 5, 0],
|
||||
}[cid])
|
||||
instance_2.get_constraint_category = Mock(
|
||||
side_effect=lambda cid: {
|
||||
"c1": "type-a",
|
||||
"c2": "type-a",
|
||||
"c3": "type-a",
|
||||
"c4": "type-b",
|
||||
"c5": "type-b",
|
||||
}[cid]
|
||||
)
|
||||
instance_2.get_constraint_features = Mock(
|
||||
side_effect=lambda cid: {
|
||||
"c1": [2, 1],
|
||||
"c2": [2, 2],
|
||||
"c3": [2, 3],
|
||||
"c4": [2, 4, 0],
|
||||
"c5": [2, 5, 0],
|
||||
}[cid]
|
||||
)
|
||||
|
||||
instances = [instance_1, instance_2]
|
||||
component = StaticLazyConstraintsComponent()
|
||||
@@ -171,18 +211,22 @@ def test_fit():
|
||||
}
|
||||
expected_x = {
|
||||
"type-a": [[1, 1], [1, 2], [1, 3], [2, 1], [2, 2], [2, 3]],
|
||||
"type-b": [[1, 4, 0], [1, 5, 0], [2, 4, 0], [2, 5, 0]]
|
||||
"type-b": [[1, 4, 0], [1, 5, 0], [2, 4, 0], [2, 5, 0]],
|
||||
}
|
||||
expected_y = {
|
||||
"type-a": [[0, 1], [0, 1], [1, 0], [1, 0], [0, 1], [0, 1]],
|
||||
"type-b": [[0, 1], [0, 1], [0, 1], [1, 0]]
|
||||
"type-b": [[0, 1], [0, 1], [0, 1], [1, 0]],
|
||||
}
|
||||
assert component._collect_constraints(instances) == expected_constraints
|
||||
assert component.x(instances) == expected_x
|
||||
assert component.y(instances) == expected_y
|
||||
|
||||
component.fit(instances)
|
||||
component.classifiers["type-a"].fit.assert_called_once_with(expected_x["type-a"],
|
||||
expected_y["type-a"])
|
||||
component.classifiers["type-b"].fit.assert_called_once_with(expected_x["type-b"],
|
||||
expected_y["type-b"])
|
||||
component.classifiers["type-a"].fit.assert_called_once_with(
|
||||
expected_x["type-a"],
|
||||
expected_y["type-a"],
|
||||
)
|
||||
component.classifiers["type-b"].fit.assert_called_once_with(
|
||||
expected_x["type-b"],
|
||||
expected_y["type-b"],
|
||||
)
|
||||
|
||||
@@ -16,8 +16,10 @@ def test_usage():
|
||||
comp.fit(instances)
|
||||
assert instances[0].lower_bound == 1183.0
|
||||
assert instances[0].upper_bound == 1183.0
|
||||
assert np.round(comp.predict(instances), 2).tolist() == [[1183.0, 1183.0],
|
||||
[1070.0, 1070.0]]
|
||||
assert np.round(comp.predict(instances), 2).tolist() == [
|
||||
[1183.0, 1183.0],
|
||||
[1070.0, 1070.0],
|
||||
]
|
||||
|
||||
|
||||
def test_obj_evaluate():
|
||||
@@ -28,20 +30,20 @@ def test_obj_evaluate():
|
||||
comp.fit(instances)
|
||||
ev = comp.evaluate(instances)
|
||||
assert ev == {
|
||||
'Lower bound': {
|
||||
'Explained variance': 0.0,
|
||||
'Max error': 183.0,
|
||||
'Mean absolute error': 126.5,
|
||||
'Mean squared error': 19194.5,
|
||||
'Median absolute error': 126.5,
|
||||
'R2': -5.012843605607331,
|
||||
"Lower bound": {
|
||||
"Explained variance": 0.0,
|
||||
"Max error": 183.0,
|
||||
"Mean absolute error": 126.5,
|
||||
"Mean squared error": 19194.5,
|
||||
"Median absolute error": 126.5,
|
||||
"R2": -5.012843605607331,
|
||||
},
|
||||
"Upper bound": {
|
||||
"Explained variance": 0.0,
|
||||
"Max error": 183.0,
|
||||
"Mean absolute error": 126.5,
|
||||
"Mean squared error": 19194.5,
|
||||
"Median absolute error": 126.5,
|
||||
"R2": -5.012843605607331,
|
||||
},
|
||||
'Upper bound': {
|
||||
'Explained variance': 0.0,
|
||||
'Max error': 183.0,
|
||||
'Mean absolute error': 126.5,
|
||||
'Mean squared error': 19194.5,
|
||||
'Median absolute error': 126.5,
|
||||
'R2': -5.012843605607331,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,71 +25,82 @@ def test_predict():
|
||||
def test_evaluate():
|
||||
instances, models = get_test_pyomo_instances()
|
||||
clf_zero = Mock(spec=Classifier)
|
||||
clf_zero.predict_proba = Mock(return_value=np.array([
|
||||
[0., 1.], # x[0]
|
||||
[0., 1.], # x[1]
|
||||
[1., 0.], # x[2]
|
||||
[1., 0.], # x[3]
|
||||
]))
|
||||
clf_zero.predict_proba = Mock(
|
||||
return_value=np.array(
|
||||
[
|
||||
[0.0, 1.0], # x[0]
|
||||
[0.0, 1.0], # x[1]
|
||||
[1.0, 0.0], # x[2]
|
||||
[1.0, 0.0], # x[3]
|
||||
]
|
||||
)
|
||||
)
|
||||
clf_one = Mock(spec=Classifier)
|
||||
clf_one.predict_proba = Mock(return_value=np.array([
|
||||
[1., 0.], # x[0] instances[0]
|
||||
[1., 0.], # x[1] instances[0]
|
||||
[0., 1.], # x[2] instances[0]
|
||||
[1., 0.], # x[3] instances[0]
|
||||
]))
|
||||
comp = PrimalSolutionComponent(classifier=[clf_zero, clf_one],
|
||||
threshold=0.50)
|
||||
clf_one.predict_proba = Mock(
|
||||
return_value=np.array(
|
||||
[
|
||||
[1.0, 0.0], # x[0] instances[0]
|
||||
[1.0, 0.0], # x[1] instances[0]
|
||||
[0.0, 1.0], # x[2] instances[0]
|
||||
[1.0, 0.0], # x[3] instances[0]
|
||||
]
|
||||
)
|
||||
)
|
||||
comp = PrimalSolutionComponent(classifier=[clf_zero, clf_one], threshold=0.50)
|
||||
comp.fit(instances[:1])
|
||||
assert comp.predict(instances[0]) == {"x": {0: 0,
|
||||
1: 0,
|
||||
2: 1,
|
||||
3: None}}
|
||||
assert instances[0].solution == {"x": {0: 1,
|
||||
1: 0,
|
||||
2: 1,
|
||||
3: 1}}
|
||||
assert comp.predict(instances[0]) == {"x": {0: 0, 1: 0, 2: 1, 3: None}}
|
||||
assert instances[0].solution == {"x": {0: 1, 1: 0, 2: 1, 3: 1}}
|
||||
ev = comp.evaluate(instances[:1])
|
||||
assert ev == {'Fix one': {0: {'Accuracy': 0.5,
|
||||
'Condition negative': 1,
|
||||
'Condition negative (%)': 25.0,
|
||||
'Condition positive': 3,
|
||||
'Condition positive (%)': 75.0,
|
||||
'F1 score': 0.5,
|
||||
'False negative': 2,
|
||||
'False negative (%)': 50.0,
|
||||
'False positive': 0,
|
||||
'False positive (%)': 0.0,
|
||||
'Precision': 1.0,
|
||||
'Predicted negative': 3,
|
||||
'Predicted negative (%)': 75.0,
|
||||
'Predicted positive': 1,
|
||||
'Predicted positive (%)': 25.0,
|
||||
'Recall': 0.3333333333333333,
|
||||
'True negative': 1,
|
||||
'True negative (%)': 25.0,
|
||||
'True positive': 1,
|
||||
'True positive (%)': 25.0}},
|
||||
'Fix zero': {0: {'Accuracy': 0.75,
|
||||
'Condition negative': 3,
|
||||
'Condition negative (%)': 75.0,
|
||||
'Condition positive': 1,
|
||||
'Condition positive (%)': 25.0,
|
||||
'F1 score': 0.6666666666666666,
|
||||
'False negative': 0,
|
||||
'False negative (%)': 0.0,
|
||||
'False positive': 1,
|
||||
'False positive (%)': 25.0,
|
||||
'Precision': 0.5,
|
||||
'Predicted negative': 2,
|
||||
'Predicted negative (%)': 50.0,
|
||||
'Predicted positive': 2,
|
||||
'Predicted positive (%)': 50.0,
|
||||
'Recall': 1.0,
|
||||
'True negative': 2,
|
||||
'True negative (%)': 50.0,
|
||||
'True positive': 1,
|
||||
'True positive (%)': 25.0}}}
|
||||
assert ev == {
|
||||
"Fix one": {
|
||||
0: {
|
||||
"Accuracy": 0.5,
|
||||
"Condition negative": 1,
|
||||
"Condition negative (%)": 25.0,
|
||||
"Condition positive": 3,
|
||||
"Condition positive (%)": 75.0,
|
||||
"F1 score": 0.5,
|
||||
"False negative": 2,
|
||||
"False negative (%)": 50.0,
|
||||
"False positive": 0,
|
||||
"False positive (%)": 0.0,
|
||||
"Precision": 1.0,
|
||||
"Predicted negative": 3,
|
||||
"Predicted negative (%)": 75.0,
|
||||
"Predicted positive": 1,
|
||||
"Predicted positive (%)": 25.0,
|
||||
"Recall": 0.3333333333333333,
|
||||
"True negative": 1,
|
||||
"True negative (%)": 25.0,
|
||||
"True positive": 1,
|
||||
"True positive (%)": 25.0,
|
||||
}
|
||||
},
|
||||
"Fix zero": {
|
||||
0: {
|
||||
"Accuracy": 0.75,
|
||||
"Condition negative": 3,
|
||||
"Condition negative (%)": 75.0,
|
||||
"Condition positive": 1,
|
||||
"Condition positive (%)": 25.0,
|
||||
"F1 score": 0.6666666666666666,
|
||||
"False negative": 0,
|
||||
"False negative (%)": 0.0,
|
||||
"False positive": 1,
|
||||
"False positive (%)": 25.0,
|
||||
"Precision": 0.5,
|
||||
"Predicted negative": 2,
|
||||
"Predicted negative (%)": 50.0,
|
||||
"Predicted positive": 2,
|
||||
"Predicted positive (%)": 50.0,
|
||||
"Recall": 1.0,
|
||||
"True negative": 2,
|
||||
"True negative (%)": 50.0,
|
||||
"True positive": 1,
|
||||
"True positive (%)": 25.0,
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def test_primal_parallel_fit():
|
||||
|
||||
@@ -4,10 +4,7 @@
|
||||
|
||||
from unittest.mock import Mock, call
|
||||
|
||||
from miplearn import (RelaxationComponent,
|
||||
LearningSolver,
|
||||
Instance,
|
||||
InternalSolver)
|
||||
from miplearn import RelaxationComponent, LearningSolver, Instance, InternalSolver
|
||||
from miplearn.classifiers import Classifier
|
||||
|
||||
|
||||
@@ -16,41 +13,49 @@ def _setup():
|
||||
|
||||
internal = solver.internal_solver = Mock(spec=InternalSolver)
|
||||
internal.get_constraint_ids = Mock(return_value=["c1", "c2", "c3", "c4"])
|
||||
internal.get_constraint_slacks = Mock(side_effect=lambda: {
|
||||
"c1": 0.5,
|
||||
"c2": 0.0,
|
||||
"c3": 0.0,
|
||||
"c4": 1.4,
|
||||
})
|
||||
internal.get_constraint_slacks = Mock(
|
||||
side_effect=lambda: {
|
||||
"c1": 0.5,
|
||||
"c2": 0.0,
|
||||
"c3": 0.0,
|
||||
"c4": 1.4,
|
||||
}
|
||||
)
|
||||
internal.extract_constraint = Mock(side_effect=lambda cid: "<%s>" % cid)
|
||||
internal.is_constraint_satisfied = Mock(return_value=False)
|
||||
|
||||
instance = Mock(spec=Instance)
|
||||
instance.get_constraint_features = Mock(side_effect=lambda cid: {
|
||||
"c2": [1.0, 0.0],
|
||||
"c3": [0.5, 0.5],
|
||||
"c4": [1.0],
|
||||
}[cid])
|
||||
instance.get_constraint_category = Mock(side_effect=lambda cid: {
|
||||
"c1": None,
|
||||
"c2": "type-a",
|
||||
"c3": "type-a",
|
||||
"c4": "type-b",
|
||||
}[cid])
|
||||
instance.get_constraint_features = Mock(
|
||||
side_effect=lambda cid: {
|
||||
"c2": [1.0, 0.0],
|
||||
"c3": [0.5, 0.5],
|
||||
"c4": [1.0],
|
||||
}[cid]
|
||||
)
|
||||
instance.get_constraint_category = Mock(
|
||||
side_effect=lambda cid: {
|
||||
"c1": None,
|
||||
"c2": "type-a",
|
||||
"c3": "type-a",
|
||||
"c4": "type-b",
|
||||
}[cid]
|
||||
)
|
||||
|
||||
classifiers = {
|
||||
"type-a": Mock(spec=Classifier),
|
||||
"type-b": Mock(spec=Classifier),
|
||||
}
|
||||
classifiers["type-a"].predict_proba = \
|
||||
Mock(return_value=[
|
||||
classifiers["type-a"].predict_proba = Mock(
|
||||
return_value=[
|
||||
[0.20, 0.80],
|
||||
[0.05, 0.95],
|
||||
])
|
||||
classifiers["type-b"].predict_proba = \
|
||||
Mock(return_value=[
|
||||
]
|
||||
)
|
||||
classifiers["type-b"].predict_proba = Mock(
|
||||
return_value=[
|
||||
[0.02, 0.98],
|
||||
])
|
||||
]
|
||||
)
|
||||
|
||||
return solver, internal, instance, classifiers
|
||||
|
||||
@@ -72,25 +77,39 @@ def test_usage():
|
||||
|
||||
# Should query category and features for each constraint in the model
|
||||
assert instance.get_constraint_category.call_count == 4
|
||||
instance.get_constraint_category.assert_has_calls([
|
||||
call("c1"), call("c2"), call("c3"), call("c4"),
|
||||
])
|
||||
instance.get_constraint_category.assert_has_calls(
|
||||
[
|
||||
call("c1"),
|
||||
call("c2"),
|
||||
call("c3"),
|
||||
call("c4"),
|
||||
]
|
||||
)
|
||||
|
||||
# For constraint with non-null categories, should ask for features
|
||||
assert instance.get_constraint_features.call_count == 3
|
||||
instance.get_constraint_features.assert_has_calls([
|
||||
call("c2"), call("c3"), call("c4"),
|
||||
])
|
||||
instance.get_constraint_features.assert_has_calls(
|
||||
[
|
||||
call("c2"),
|
||||
call("c3"),
|
||||
call("c4"),
|
||||
]
|
||||
)
|
||||
|
||||
# Should ask ML to predict whether constraint should be removed
|
||||
component.classifiers["type-a"].predict_proba.assert_called_once_with([[1.0, 0.0], [0.5, 0.5]])
|
||||
component.classifiers["type-a"].predict_proba.assert_called_once_with(
|
||||
[[1.0, 0.0], [0.5, 0.5]]
|
||||
)
|
||||
component.classifiers["type-b"].predict_proba.assert_called_once_with([[1.0]])
|
||||
|
||||
# Should ask internal solver to remove constraints predicted as redundant
|
||||
assert internal.extract_constraint.call_count == 2
|
||||
internal.extract_constraint.assert_has_calls([
|
||||
call("c3"), call("c4"),
|
||||
])
|
||||
internal.extract_constraint.assert_has_calls(
|
||||
[
|
||||
call("c3"),
|
||||
call("c4"),
|
||||
]
|
||||
)
|
||||
|
||||
# LearningSolver calls after_solve
|
||||
component.after_solve(solver, instance, None, None)
|
||||
@@ -111,8 +130,7 @@ def test_usage():
|
||||
def test_usage_with_check_dropped():
|
||||
solver, internal, instance, classifiers = _setup()
|
||||
|
||||
component = RelaxationComponent(check_dropped=True,
|
||||
violation_tolerance=1e-3)
|
||||
component = RelaxationComponent(check_dropped=True, violation_tolerance=1e-3)
|
||||
component.classifiers = classifiers
|
||||
|
||||
# LearningSolver call before_solve
|
||||
@@ -120,9 +138,12 @@ def test_usage_with_check_dropped():
|
||||
|
||||
# Assert constraints are extracted
|
||||
assert internal.extract_constraint.call_count == 2
|
||||
internal.extract_constraint.assert_has_calls([
|
||||
call("c3"), call("c4"),
|
||||
])
|
||||
internal.extract_constraint.assert_has_calls(
|
||||
[
|
||||
call("c3"),
|
||||
call("c4"),
|
||||
]
|
||||
)
|
||||
|
||||
# LearningSolver calls iteration_cb (first time)
|
||||
should_repeat = component.iteration_cb(solver, instance, None)
|
||||
@@ -131,15 +152,15 @@ def test_usage_with_check_dropped():
|
||||
assert should_repeat
|
||||
|
||||
# Should ask solver if removed constraints are satisfied (mock always returns false)
|
||||
internal.is_constraint_satisfied.assert_has_calls([
|
||||
call("<c3>", 1e-3),
|
||||
call("<c4>", 1e-3),
|
||||
])
|
||||
internal.is_constraint_satisfied.assert_has_calls(
|
||||
[
|
||||
call("<c3>", 1e-3),
|
||||
call("<c4>", 1e-3),
|
||||
]
|
||||
)
|
||||
|
||||
# Should add constraints back to LP relaxation
|
||||
internal.add_constraint.assert_has_calls([
|
||||
call("<c3>"), call("<c4>")
|
||||
])
|
||||
internal.add_constraint.assert_has_calls([call("<c3>"), call("<c4>")])
|
||||
|
||||
# LearningSolver calls iteration_cb (second time)
|
||||
should_repeat = component.iteration_cb(solver, instance, None)
|
||||
@@ -148,21 +169,22 @@ def test_usage_with_check_dropped():
|
||||
|
||||
def test_x_y_fit_predict_evaluate():
|
||||
instances = [Mock(spec=Instance), Mock(spec=Instance)]
|
||||
component = RelaxationComponent(slack_tolerance=0.05,
|
||||
threshold=0.80)
|
||||
component = RelaxationComponent(slack_tolerance=0.05, threshold=0.80)
|
||||
component.classifiers = {
|
||||
"type-a": Mock(spec=Classifier),
|
||||
"type-b": Mock(spec=Classifier),
|
||||
}
|
||||
component.classifiers["type-a"].predict_proba = \
|
||||
Mock(return_value=[
|
||||
component.classifiers["type-a"].predict_proba = Mock(
|
||||
return_value=[
|
||||
[0.20, 0.80],
|
||||
])
|
||||
component.classifiers["type-b"].predict_proba = \
|
||||
Mock(return_value=[
|
||||
]
|
||||
)
|
||||
component.classifiers["type-b"].predict_proba = Mock(
|
||||
return_value=[
|
||||
[0.50, 0.50],
|
||||
[0.05, 0.95],
|
||||
])
|
||||
]
|
||||
)
|
||||
|
||||
# First mock instance
|
||||
instances[0].slacks = {
|
||||
@@ -171,17 +193,21 @@ def test_x_y_fit_predict_evaluate():
|
||||
"c3": 0.00,
|
||||
"c4": 30.0,
|
||||
}
|
||||
instances[0].get_constraint_category = Mock(side_effect=lambda cid: {
|
||||
"c1": None,
|
||||
"c2": "type-a",
|
||||
"c3": "type-a",
|
||||
"c4": "type-b",
|
||||
}[cid])
|
||||
instances[0].get_constraint_features = Mock(side_effect=lambda cid: {
|
||||
"c2": [1.0, 0.0],
|
||||
"c3": [0.5, 0.5],
|
||||
"c4": [1.0],
|
||||
}[cid])
|
||||
instances[0].get_constraint_category = Mock(
|
||||
side_effect=lambda cid: {
|
||||
"c1": None,
|
||||
"c2": "type-a",
|
||||
"c3": "type-a",
|
||||
"c4": "type-b",
|
||||
}[cid]
|
||||
)
|
||||
instances[0].get_constraint_features = Mock(
|
||||
side_effect=lambda cid: {
|
||||
"c2": [1.0, 0.0],
|
||||
"c3": [0.5, 0.5],
|
||||
"c4": [1.0],
|
||||
}[cid]
|
||||
)
|
||||
|
||||
# Second mock instance
|
||||
instances[1].slacks = {
|
||||
@@ -190,26 +216,27 @@ def test_x_y_fit_predict_evaluate():
|
||||
"c4": 0.00,
|
||||
"c5": 0.00,
|
||||
}
|
||||
instances[1].get_constraint_category = Mock(side_effect=lambda cid: {
|
||||
"c1": None,
|
||||
"c3": "type-a",
|
||||
"c4": "type-b",
|
||||
"c5": "type-b",
|
||||
}[cid])
|
||||
instances[1].get_constraint_features = Mock(side_effect=lambda cid: {
|
||||
"c3": [0.3, 0.4],
|
||||
"c4": [0.7],
|
||||
"c5": [0.8],
|
||||
}[cid])
|
||||
instances[1].get_constraint_category = Mock(
|
||||
side_effect=lambda cid: {
|
||||
"c1": None,
|
||||
"c3": "type-a",
|
||||
"c4": "type-b",
|
||||
"c5": "type-b",
|
||||
}[cid]
|
||||
)
|
||||
instances[1].get_constraint_features = Mock(
|
||||
side_effect=lambda cid: {
|
||||
"c3": [0.3, 0.4],
|
||||
"c4": [0.7],
|
||||
"c5": [0.8],
|
||||
}[cid]
|
||||
)
|
||||
|
||||
expected_x = {
|
||||
"type-a": [[1.0, 0.0], [0.5, 0.5], [0.3, 0.4]],
|
||||
"type-b": [[1.0], [0.7], [0.8]],
|
||||
}
|
||||
expected_y = {
|
||||
"type-a": [[0], [0], [1]],
|
||||
"type-b": [[1], [0], [0]]
|
||||
}
|
||||
expected_y = {"type-a": [[0], [0], [1]], "type-b": [[1], [0], [0]]}
|
||||
|
||||
# Should build X and Y matrices correctly
|
||||
assert component.x(instances) == expected_x
|
||||
@@ -217,13 +244,16 @@ def test_x_y_fit_predict_evaluate():
|
||||
|
||||
# Should pass along X and Y matrices to classifiers
|
||||
component.fit(instances)
|
||||
component.classifiers["type-a"].fit.assert_called_with(expected_x["type-a"], expected_y["type-a"])
|
||||
component.classifiers["type-b"].fit.assert_called_with(expected_x["type-b"], expected_y["type-b"])
|
||||
component.classifiers["type-a"].fit.assert_called_with(
|
||||
expected_x["type-a"],
|
||||
expected_y["type-a"],
|
||||
)
|
||||
component.classifiers["type-b"].fit.assert_called_with(
|
||||
expected_x["type-b"],
|
||||
expected_y["type-b"],
|
||||
)
|
||||
|
||||
assert component.predict(expected_x) == {
|
||||
"type-a": [[1]],
|
||||
"type-b": [[0], [1]]
|
||||
}
|
||||
assert component.predict(expected_x) == {"type-a": [[1]], "type-b": [[0], [1]]}
|
||||
|
||||
ev = component.evaluate(instances[1])
|
||||
assert ev["True positive"] == 1
|
||||
|
||||
@@ -18,10 +18,10 @@ class InstanceIterator:
|
||||
def __init__(self, instances):
|
||||
self.instances = instances
|
||||
self.current = 0
|
||||
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
|
||||
def __next__(self):
|
||||
if self.current >= len(self.instances):
|
||||
raise StopIteration
|
||||
@@ -40,9 +40,9 @@ class InstanceIterator:
|
||||
|
||||
class Extractor(ABC):
|
||||
@abstractmethod
|
||||
def extract(self, instances,):
|
||||
def extract(self, instances):
|
||||
pass
|
||||
|
||||
|
||||
@staticmethod
|
||||
def split_variables(instance):
|
||||
assert hasattr(instance, "lp_solution")
|
||||
@@ -57,13 +57,15 @@ class Extractor(ABC):
|
||||
result[category] += [(var_name, index)]
|
||||
return result
|
||||
|
||||
|
||||
|
||||
class VariableFeaturesExtractor(Extractor):
|
||||
def extract(self, instances):
|
||||
result = {}
|
||||
for instance in tqdm(InstanceIterator(instances),
|
||||
desc="Extract (vars)",
|
||||
disable=len(instances) < 5):
|
||||
for instance in tqdm(
|
||||
InstanceIterator(instances),
|
||||
desc="Extract (vars)",
|
||||
disable=len(instances) < 5,
|
||||
):
|
||||
instance_features = instance.get_instance_features()
|
||||
var_split = self.split_variables(instance)
|
||||
for (category, var_index_pairs) in var_split.items():
|
||||
@@ -71,9 +73,9 @@ class VariableFeaturesExtractor(Extractor):
|
||||
result[category] = []
|
||||
for (var_name, index) in var_index_pairs:
|
||||
result[category] += [
|
||||
instance_features.tolist() + \
|
||||
instance.get_variable_features(var_name, index).tolist() + \
|
||||
[instance.lp_solution[var_name][index]]
|
||||
instance_features.tolist()
|
||||
+ instance.get_variable_features(var_name, index).tolist()
|
||||
+ [instance.lp_solution[var_name][index]]
|
||||
]
|
||||
for category in result:
|
||||
result[category] = np.array(result[category])
|
||||
@@ -83,12 +85,14 @@ class VariableFeaturesExtractor(Extractor):
|
||||
class SolutionExtractor(Extractor):
|
||||
def __init__(self, relaxation=False):
|
||||
self.relaxation = relaxation
|
||||
|
||||
|
||||
def extract(self, instances):
|
||||
result = {}
|
||||
for instance in tqdm(InstanceIterator(instances),
|
||||
desc="Extract (solution)",
|
||||
disable=len(instances) < 5):
|
||||
for instance in tqdm(
|
||||
InstanceIterator(instances),
|
||||
desc="Extract (solution)",
|
||||
disable=len(instances) < 5,
|
||||
):
|
||||
var_split = self.split_variables(instance)
|
||||
for (category, var_index_pairs) in var_split.items():
|
||||
if category not in result:
|
||||
@@ -103,33 +107,40 @@ class SolutionExtractor(Extractor):
|
||||
else:
|
||||
result[category] += [[1 - v, v]]
|
||||
for category in result:
|
||||
result[category] = np.array(result[category])
|
||||
result[category] = np.array(result[category])
|
||||
return result
|
||||
|
||||
|
||||
|
||||
|
||||
class InstanceFeaturesExtractor(Extractor):
|
||||
def extract(self, instances):
|
||||
return np.vstack([
|
||||
np.hstack([
|
||||
instance.get_instance_features(),
|
||||
instance.lp_value,
|
||||
])
|
||||
for instance in InstanceIterator(instances)
|
||||
])
|
||||
|
||||
|
||||
return np.vstack(
|
||||
[
|
||||
np.hstack(
|
||||
[
|
||||
instance.get_instance_features(),
|
||||
instance.lp_value,
|
||||
]
|
||||
)
|
||||
for instance in InstanceIterator(instances)
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
class ObjectiveValueExtractor(Extractor):
|
||||
def __init__(self, kind="lp"):
|
||||
assert kind in ["lower bound", "upper bound", "lp"]
|
||||
self.kind = kind
|
||||
|
||||
|
||||
def extract(self, instances):
|
||||
if self.kind == "lower bound":
|
||||
return np.array([[instance.lower_bound]
|
||||
for instance in InstanceIterator(instances)])
|
||||
return np.array(
|
||||
[[instance.lower_bound] for instance in InstanceIterator(instances)]
|
||||
)
|
||||
if self.kind == "upper bound":
|
||||
return np.array([[instance.upper_bound]
|
||||
for instance in InstanceIterator(instances)])
|
||||
return np.array(
|
||||
[[instance.upper_bound] for instance in InstanceIterator(instances)]
|
||||
)
|
||||
if self.kind == "lp":
|
||||
return np.array([[instance.lp_value]
|
||||
for instance in InstanceIterator(instances)])
|
||||
return np.array(
|
||||
[[instance.lp_value] for instance in InstanceIterator(instances)]
|
||||
)
|
||||
|
||||
@@ -12,7 +12,7 @@ import numpy as np
|
||||
class Instance(ABC):
|
||||
"""
|
||||
Abstract class holding all the data necessary to generate a concrete model of the problem.
|
||||
|
||||
|
||||
In the knapsack problem, for example, this class could hold the number of items, their weights
|
||||
and costs, as well as the size of the knapsack. Objects implementing this class are able to
|
||||
convert themselves into a concrete optimization model, which can be optimized by a solver, or
|
||||
@@ -29,17 +29,17 @@ class Instance(ABC):
|
||||
def get_instance_features(self):
|
||||
"""
|
||||
Returns a 1-dimensional Numpy array of (numerical) features describing the entire instance.
|
||||
|
||||
|
||||
The array is used by LearningSolver to determine how similar two instances are. It may also
|
||||
be used to predict, in combination with variable-specific features, the values of binary
|
||||
decision variables in the problem.
|
||||
|
||||
|
||||
There is not necessarily a one-to-one correspondence between models and instance features:
|
||||
the features may encode only part of the data necessary to generate the complete model.
|
||||
Features may also be statistics computed from the original data. For example, in the
|
||||
knapsack problem, an implementation may decide to provide as instance features only
|
||||
the average weights, average prices, number of items and the size of the knapsack.
|
||||
|
||||
|
||||
The returned array MUST have the same length for all relevant instances of the problem. If
|
||||
two instances map into arrays of different lengths, they cannot be solved by the same
|
||||
LearningSolver object.
|
||||
@@ -52,16 +52,16 @@ class Instance(ABC):
|
||||
"""
|
||||
Returns a 1-dimensional array of (numerical) features describing a particular decision
|
||||
variable.
|
||||
|
||||
|
||||
The argument `var` is a pyomo.core.Var object, which represents a collection of decision
|
||||
variables. The argument `index` specifies which variable in the collection is the relevant
|
||||
one.
|
||||
|
||||
|
||||
In combination with instance features, variable features are used by LearningSolver to
|
||||
predict, among other things, the optimal value of each decision variable before the
|
||||
optimization takes place. In the knapsack problem, for example, an implementation could
|
||||
provide as variable features the weight and the price of a specific item.
|
||||
|
||||
|
||||
Like instance features, the arrays returned by this method MUST have the same length for
|
||||
all variables within the same category, for all relevant instances of the problem.
|
||||
|
||||
@@ -73,7 +73,7 @@ class Instance(ABC):
|
||||
"""
|
||||
Returns the category (a string, an integer or any hashable type) for each decision
|
||||
variable.
|
||||
|
||||
|
||||
If two variables have the same category, LearningSolver will use the same internal ML
|
||||
model to predict the values of both variables. If the returned category is None, ML
|
||||
models will ignore the variable.
|
||||
@@ -100,18 +100,18 @@ class Instance(ABC):
|
||||
def find_violated_lazy_constraints(self, model):
|
||||
"""
|
||||
Returns lazy constraint violations found for the current solution.
|
||||
|
||||
|
||||
After solving a model, LearningSolver will ask the instance to identify which lazy
|
||||
constraints are violated by the current solution. For each identified violation,
|
||||
LearningSolver will then call the build_lazy_constraint, add the generated Pyomo
|
||||
constraint to the model, then resolve the problem. The process repeats until no further
|
||||
lazy constraint violations are found.
|
||||
|
||||
|
||||
Each "violation" is simply a string, a tuple or any other hashable type which allows the
|
||||
instance to identify unambiguously which lazy constraint should be generated. In the
|
||||
Traveling Salesman Problem, for example, a subtour violation could be a frozen set
|
||||
containing the cities in the subtour.
|
||||
|
||||
|
||||
For a concrete example, see TravelingSalesmanInstance.
|
||||
"""
|
||||
return []
|
||||
@@ -119,17 +119,17 @@ class Instance(ABC):
|
||||
def build_lazy_constraint(self, model, violation):
|
||||
"""
|
||||
Returns a Pyomo constraint which fixes a given violation.
|
||||
|
||||
|
||||
This method is typically called immediately after find_violated_lazy_constraints. The violation object
|
||||
provided to this method is exactly the same object returned earlier by find_violated_lazy_constraints.
|
||||
After some training, LearningSolver may decide to proactively build some lazy constraints
|
||||
at the beginning of the optimization process, before a solution is even available. In this
|
||||
case, build_lazy_constraints will be called without a corresponding call to
|
||||
find_violated_lazy_constraints.
|
||||
|
||||
|
||||
The implementation should not directly add the constraint to the model. The constraint
|
||||
will be added by LearningSolver after the method returns.
|
||||
|
||||
|
||||
For a concrete example, see TravelingSalesmanInstance.
|
||||
"""
|
||||
pass
|
||||
@@ -141,11 +141,11 @@ class Instance(ABC):
|
||||
pass
|
||||
|
||||
def load(self, filename):
|
||||
with gzip.GzipFile(filename, 'r') as f:
|
||||
data = json.loads(f.read().decode('utf-8'))
|
||||
with gzip.GzipFile(filename, "r") as f:
|
||||
data = json.loads(f.read().decode("utf-8"))
|
||||
self.__dict__ = data
|
||||
|
||||
def dump(self, filename):
|
||||
data = json.dumps(self.__dict__, indent=2).encode('utf-8')
|
||||
with gzip.GzipFile(filename, 'w') as f:
|
||||
data = json.dumps(self.__dict__, indent=2).encode("utf-8")
|
||||
with gzip.GzipFile(filename, "w") as f:
|
||||
f.write(data)
|
||||
|
||||
@@ -7,7 +7,8 @@ import logging
|
||||
import time
|
||||
import sys
|
||||
|
||||
class TimeFormatter():
|
||||
|
||||
class TimeFormatter:
|
||||
def __init__(self, start_time, log_colors):
|
||||
self.start_time = start_time
|
||||
self.log_colors = log_colors
|
||||
@@ -19,21 +20,23 @@ class TimeFormatter():
|
||||
color = self.log_colors["yellow"]
|
||||
else:
|
||||
color = self.log_colors["green"]
|
||||
return "%s[%12.3f]%s %s" % (color,
|
||||
record.created - self.start_time,
|
||||
self.log_colors["reset"],
|
||||
record.getMessage())
|
||||
return "%s[%12.3f]%s %s" % (
|
||||
color,
|
||||
record.created - self.start_time,
|
||||
self.log_colors["reset"],
|
||||
record.getMessage(),
|
||||
)
|
||||
|
||||
def setup_logger(start_time=None,
|
||||
force_color=False):
|
||||
|
||||
def setup_logger(start_time=None, force_color=False):
|
||||
if start_time is None:
|
||||
start_time = time.time()
|
||||
if sys.stdout.isatty() or force_color:
|
||||
log_colors = {
|
||||
"green": '\033[92m',
|
||||
"yellow": '\033[93m',
|
||||
"red": '\033[91m',
|
||||
"reset": '\033[0m',
|
||||
"green": "\033[92m",
|
||||
"yellow": "\033[93m",
|
||||
"red": "\033[91m",
|
||||
"reset": "\033[0m",
|
||||
}
|
||||
else:
|
||||
log_colors = {
|
||||
@@ -41,7 +44,7 @@ def setup_logger(start_time=None,
|
||||
"yellow": "",
|
||||
"red": "",
|
||||
"reset": "",
|
||||
}
|
||||
}
|
||||
handler = logging.StreamHandler()
|
||||
handler.setFormatter(TimeFormatter(start_time, log_colors))
|
||||
logging.getLogger().addHandler(handler)
|
||||
|
||||
@@ -17,44 +17,45 @@ class ChallengeA:
|
||||
- K = 500, u ~ U(0., 1.)
|
||||
- alpha = 0.25
|
||||
"""
|
||||
def __init__(self,
|
||||
seed=42,
|
||||
n_training_instances=500,
|
||||
n_test_instances=50):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
seed=42,
|
||||
n_training_instances=500,
|
||||
n_test_instances=50,
|
||||
):
|
||||
|
||||
np.random.seed(seed)
|
||||
self.gen = MultiKnapsackGenerator(n=randint(low=250, high=251),
|
||||
m=randint(low=10, high=11),
|
||||
w=uniform(loc=0.0, scale=1000.0),
|
||||
K=uniform(loc=500.0, scale=0.0),
|
||||
u=uniform(loc=0.0, scale=1.0),
|
||||
alpha=uniform(loc=0.25, scale=0.0),
|
||||
fix_w=True,
|
||||
w_jitter=uniform(loc=0.95, scale=0.1),
|
||||
)
|
||||
self.gen = MultiKnapsackGenerator(
|
||||
n=randint(low=250, high=251),
|
||||
m=randint(low=10, high=11),
|
||||
w=uniform(loc=0.0, scale=1000.0),
|
||||
K=uniform(loc=500.0, scale=0.0),
|
||||
u=uniform(loc=0.0, scale=1.0),
|
||||
alpha=uniform(loc=0.25, scale=0.0),
|
||||
fix_w=True,
|
||||
w_jitter=uniform(loc=0.95, scale=0.1),
|
||||
)
|
||||
np.random.seed(seed + 1)
|
||||
self.training_instances = self.gen.generate(n_training_instances)
|
||||
|
||||
|
||||
np.random.seed(seed + 2)
|
||||
self.test_instances = self.gen.generate(n_test_instances)
|
||||
|
||||
|
||||
class MultiKnapsackInstance(Instance):
|
||||
"""Representation of the Multidimensional 0-1 Knapsack Problem.
|
||||
|
||||
|
||||
Given a set of n items and m knapsacks, the problem is to find a subset of items S maximizing
|
||||
sum(prices[i] for i in S). If selected, each item i occupies weights[i,j] units of space in
|
||||
each knapsack j. Furthermore, each knapsack j has limited storage space, given by capacities[j].
|
||||
|
||||
|
||||
This implementation assigns a different category for each decision variable, and therefore
|
||||
trains one ML model per variable. It is only suitable when training and test instances have
|
||||
same size and items don't shuffle around.
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
prices,
|
||||
capacities,
|
||||
weights):
|
||||
|
||||
def __init__(self, prices, capacities, weights):
|
||||
assert isinstance(prices, np.ndarray)
|
||||
assert isinstance(capacities, np.ndarray)
|
||||
assert isinstance(weights, np.ndarray)
|
||||
@@ -65,83 +66,92 @@ class MultiKnapsackInstance(Instance):
|
||||
self.prices = prices
|
||||
self.capacities = capacities
|
||||
self.weights = weights
|
||||
|
||||
|
||||
def to_model(self):
|
||||
model = pe.ConcreteModel()
|
||||
model.x = pe.Var(range(self.n), domain=pe.Binary)
|
||||
model.OBJ = pe.Objective(rule=lambda model: sum(model.x[j] * self.prices[j]
|
||||
for j in range(self.n)),
|
||||
sense=pe.maximize)
|
||||
model.OBJ = pe.Objective(
|
||||
rule=lambda model: sum(model.x[j] * self.prices[j] for j in range(self.n)),
|
||||
sense=pe.maximize,
|
||||
)
|
||||
model.eq_capacity = pe.ConstraintList()
|
||||
for i in range(self.m):
|
||||
model.eq_capacity.add(sum(model.x[j] * self.weights[i,j]
|
||||
for j in range(self.n)) <= self.capacities[i])
|
||||
|
||||
model.eq_capacity.add(
|
||||
sum(model.x[j] * self.weights[i, j] for j in range(self.n))
|
||||
<= self.capacities[i]
|
||||
)
|
||||
|
||||
return model
|
||||
|
||||
|
||||
def get_instance_features(self):
|
||||
return np.hstack([
|
||||
np.mean(self.prices),
|
||||
self.capacities,
|
||||
])
|
||||
return np.hstack(
|
||||
[
|
||||
np.mean(self.prices),
|
||||
self.capacities,
|
||||
]
|
||||
)
|
||||
|
||||
def get_variable_features(self, var, index):
|
||||
return np.hstack([
|
||||
self.prices[index],
|
||||
self.weights[:, index],
|
||||
])
|
||||
return np.hstack(
|
||||
[
|
||||
self.prices[index],
|
||||
self.weights[:, index],
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
# def get_variable_category(self, var, index):
|
||||
# return index
|
||||
|
||||
|
||||
class MultiKnapsackGenerator:
|
||||
def __init__(self,
|
||||
n=randint(low=100, high=101),
|
||||
m=randint(low=30, high=31),
|
||||
w=randint(low=0, high=1000),
|
||||
K=randint(low=500, high=500),
|
||||
u=uniform(loc=0.0, scale=1.0),
|
||||
alpha=uniform(loc=0.25, scale=0.0),
|
||||
fix_w=False,
|
||||
w_jitter=uniform(loc=1.0, scale=0.0),
|
||||
round=True,
|
||||
):
|
||||
def __init__(
|
||||
self,
|
||||
n=randint(low=100, high=101),
|
||||
m=randint(low=30, high=31),
|
||||
w=randint(low=0, high=1000),
|
||||
K=randint(low=500, high=500),
|
||||
u=uniform(loc=0.0, scale=1.0),
|
||||
alpha=uniform(loc=0.25, scale=0.0),
|
||||
fix_w=False,
|
||||
w_jitter=uniform(loc=1.0, scale=0.0),
|
||||
round=True,
|
||||
):
|
||||
"""Initialize the problem generator.
|
||||
|
||||
|
||||
Instances have a random number of items (or variables) and a random number of knapsacks
|
||||
(or constraints), as specified by the provided probability distributions `n` and `m`,
|
||||
respectively. The weight of each item `i` on knapsack `j` is sampled independently from
|
||||
the provided distribution `w`. The capacity of knapsack `j` is set to:
|
||||
|
||||
|
||||
alpha_j * sum(w[i,j] for i in range(n)),
|
||||
|
||||
|
||||
where `alpha_j`, the tightness ratio, is sampled from the provided probability
|
||||
distribution `alpha`. To make the instances more challenging, the costs of the items
|
||||
are linearly correlated to their average weights. More specifically, the weight of each
|
||||
item `i` is set to:
|
||||
|
||||
|
||||
sum(w[i,j]/m for j in range(m)) + K * u_i,
|
||||
|
||||
|
||||
where `K`, the correlation coefficient, and `u_i`, the correlation multiplier, are sampled
|
||||
from the provided probability distributions. Note that `K` is only sample once for the
|
||||
entire instance.
|
||||
|
||||
|
||||
If fix_w=True is provided, then w[i,j] are kept the same in all generated instances. This
|
||||
also implies that n and m are kept fixed. Although the prices and capacities are derived
|
||||
from w[i,j], as long as u and K are not constants, the generated instances will still not
|
||||
be completely identical.
|
||||
|
||||
If a probability distribution w_jitter is provided, then item weights will be set to
|
||||
w[i,j] * gamma[i,j] where gamma[i,j] is sampled from w_jitter. When combined with
|
||||
w[i,j] * gamma[i,j] where gamma[i,j] is sampled from w_jitter. When combined with
|
||||
fix_w=True, this argument may be used to generate instances where the weight of each item
|
||||
is roughly the same, but not exactly identical, across all instances. The prices of the
|
||||
items and the capacities of the knapsacks will be calculated as above, but using these
|
||||
perturbed weights instead.
|
||||
|
||||
|
||||
By default, all generated prices, weights and capacities are rounded to the nearest integer
|
||||
number. If `round=False` is provided, this rounding will be disabled.
|
||||
|
||||
|
||||
Parameters
|
||||
----------
|
||||
n: rv_discrete
|
||||
@@ -168,11 +178,14 @@ class MultiKnapsackGenerator:
|
||||
assert isinstance(w, rv_frozen), "w should be a SciPy probability distribution"
|
||||
assert isinstance(K, rv_frozen), "K should be a SciPy probability distribution"
|
||||
assert isinstance(u, rv_frozen), "u should be a SciPy probability distribution"
|
||||
assert isinstance(alpha, rv_frozen), "alpha should be a SciPy probability distribution"
|
||||
assert isinstance(
|
||||
alpha, rv_frozen
|
||||
), "alpha should be a SciPy probability distribution"
|
||||
assert isinstance(fix_w, bool), "fix_w should be boolean"
|
||||
assert isinstance(w_jitter, rv_frozen), \
|
||||
"w_jitter should be a SciPy probability distribution"
|
||||
|
||||
assert isinstance(
|
||||
w_jitter, rv_frozen
|
||||
), "w_jitter should be a SciPy probability distribution"
|
||||
|
||||
self.n = n
|
||||
self.m = m
|
||||
self.w = w
|
||||
@@ -181,7 +194,7 @@ class MultiKnapsackGenerator:
|
||||
self.alpha = alpha
|
||||
self.w_jitter = w_jitter
|
||||
self.round = round
|
||||
|
||||
|
||||
if fix_w:
|
||||
self.fix_n = self.n.rvs()
|
||||
self.fix_m = self.m.rvs()
|
||||
@@ -194,7 +207,7 @@ class MultiKnapsackGenerator:
|
||||
self.fix_w = None
|
||||
self.fix_u = None
|
||||
self.fix_K = None
|
||||
|
||||
|
||||
def generate(self, n_samples):
|
||||
def _sample():
|
||||
if self.fix_w is not None:
|
||||
@@ -211,20 +224,22 @@ class MultiKnapsackGenerator:
|
||||
K = self.K.rvs()
|
||||
w = w * np.array([self.w_jitter.rvs(n) for _ in range(m)])
|
||||
alpha = self.alpha.rvs(m)
|
||||
p = np.array([w[:,j].sum() / m + K * u[j] for j in range(n)])
|
||||
b = np.array([w[i,:].sum() * alpha[i] for i in range(m)])
|
||||
p = np.array([w[:, j].sum() / m + K * u[j] for j in range(n)])
|
||||
b = np.array([w[i, :].sum() * alpha[i] for i in range(m)])
|
||||
if self.round:
|
||||
p = p.round()
|
||||
b = b.round()
|
||||
w = w.round()
|
||||
return MultiKnapsackInstance(p, b, w)
|
||||
|
||||
return [_sample() for _ in range(n_samples)]
|
||||
|
||||
|
||||
|
||||
class KnapsackInstance(Instance):
|
||||
"""
|
||||
Simpler (one-dimensional) Knapsack Problem, used for testing.
|
||||
"""
|
||||
|
||||
def __init__(self, weights, prices, capacity):
|
||||
self.weights = weights
|
||||
self.prices = prices
|
||||
@@ -234,23 +249,29 @@ class KnapsackInstance(Instance):
|
||||
model = pe.ConcreteModel()
|
||||
items = range(len(self.weights))
|
||||
model.x = pe.Var(items, domain=pe.Binary)
|
||||
model.OBJ = pe.Objective(expr=sum(model.x[v] * self.prices[v] for v in items),
|
||||
sense=pe.maximize)
|
||||
model.eq_capacity = pe.Constraint(expr=sum(model.x[v] * self.weights[v]
|
||||
for v in items) <= self.capacity)
|
||||
model.OBJ = pe.Objective(
|
||||
expr=sum(model.x[v] * self.prices[v] for v in items), sense=pe.maximize
|
||||
)
|
||||
model.eq_capacity = pe.Constraint(
|
||||
expr=sum(model.x[v] * self.weights[v] for v in items) <= self.capacity
|
||||
)
|
||||
return model
|
||||
|
||||
def get_instance_features(self):
|
||||
return np.array([
|
||||
self.capacity,
|
||||
np.average(self.weights),
|
||||
])
|
||||
return np.array(
|
||||
[
|
||||
self.capacity,
|
||||
np.average(self.weights),
|
||||
]
|
||||
)
|
||||
|
||||
def get_variable_features(self, var, index):
|
||||
return np.array([
|
||||
self.weights[index],
|
||||
self.prices[index],
|
||||
])
|
||||
return np.array(
|
||||
[
|
||||
self.weights[index],
|
||||
self.prices[index],
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
class GurobiKnapsackInstance(KnapsackInstance):
|
||||
@@ -258,6 +279,7 @@ class GurobiKnapsackInstance(KnapsackInstance):
|
||||
Simpler (one-dimensional) knapsack instance, implemented directly in Gurobi
|
||||
instead of Pyomo, used for testing.
|
||||
"""
|
||||
|
||||
def __init__(self, weights, prices, capacity):
|
||||
super().__init__(weights, prices, capacity)
|
||||
|
||||
@@ -268,9 +290,11 @@ class GurobiKnapsackInstance(KnapsackInstance):
|
||||
model = gp.Model("Knapsack")
|
||||
n = len(self.weights)
|
||||
x = model.addVars(n, vtype=GRB.BINARY, name="x")
|
||||
model.addConstr(gp.quicksum(x[i] * self.weights[i]
|
||||
for i in range(n)) <= self.capacity,
|
||||
"eq_capacity")
|
||||
model.setObjective(gp.quicksum(x[i] * self.prices[i]
|
||||
for i in range(n)), GRB.MAXIMIZE)
|
||||
model.addConstr(
|
||||
gp.quicksum(x[i] * self.weights[i] for i in range(n)) <= self.capacity,
|
||||
"eq_capacity",
|
||||
)
|
||||
model.setObjective(
|
||||
gp.quicksum(x[i] * self.prices[i] for i in range(n)), GRB.MAXIMIZE
|
||||
)
|
||||
return model
|
||||
|
||||
@@ -12,44 +12,49 @@ from scipy.stats.distributions import rv_frozen
|
||||
|
||||
|
||||
class ChallengeA:
|
||||
def __init__(self,
|
||||
seed=42,
|
||||
n_training_instances=500,
|
||||
n_test_instances=50,
|
||||
):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
seed=42,
|
||||
n_training_instances=500,
|
||||
n_test_instances=50,
|
||||
):
|
||||
|
||||
np.random.seed(seed)
|
||||
self.generator = MaxWeightStableSetGenerator(w=uniform(loc=100., scale=50.),
|
||||
n=randint(low=200, high=201),
|
||||
p=uniform(loc=0.05, scale=0.0),
|
||||
fix_graph=True)
|
||||
|
||||
self.generator = MaxWeightStableSetGenerator(
|
||||
w=uniform(loc=100.0, scale=50.0),
|
||||
n=randint(low=200, high=201),
|
||||
p=uniform(loc=0.05, scale=0.0),
|
||||
fix_graph=True,
|
||||
)
|
||||
|
||||
np.random.seed(seed + 1)
|
||||
self.training_instances = self.generator.generate(n_training_instances)
|
||||
|
||||
|
||||
np.random.seed(seed + 2)
|
||||
self.test_instances = self.generator.generate(n_test_instances)
|
||||
|
||||
|
||||
class MaxWeightStableSetGenerator:
|
||||
"""Random instance generator for the Maximum-Weight Stable Set Problem.
|
||||
|
||||
|
||||
The generator has two modes of operation. When `fix_graph=True` is provided, one random
|
||||
Erdős-Rényi graph $G_{n,p}$ is generated in the constructor, where $n$ and $p$ are sampled
|
||||
from user-provided probability distributions `n` and `p`. To generate each instance, the
|
||||
generator independently samples each $w_v$ from the user-provided probability distribution `w`.
|
||||
|
||||
|
||||
When `fix_graph=False`, a new random graph is generated for each instance; the remaining
|
||||
parameters are sampled in the same way.
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
w=uniform(loc=10.0, scale=1.0),
|
||||
n=randint(low=250, high=251),
|
||||
p=uniform(loc=0.05, scale=0.0),
|
||||
fix_graph=True):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
w=uniform(loc=10.0, scale=1.0),
|
||||
n=randint(low=250, high=251),
|
||||
p=uniform(loc=0.05, scale=0.0),
|
||||
fix_graph=True,
|
||||
):
|
||||
"""Initialize the problem generator.
|
||||
|
||||
|
||||
Parameters
|
||||
----------
|
||||
w: rv_continuous
|
||||
@@ -69,7 +74,7 @@ class MaxWeightStableSetGenerator:
|
||||
self.graph = None
|
||||
if fix_graph:
|
||||
self.graph = self._generate_graph()
|
||||
|
||||
|
||||
def generate(self, n_samples):
|
||||
def _sample():
|
||||
if self.graph is not None:
|
||||
@@ -78,22 +83,23 @@ class MaxWeightStableSetGenerator:
|
||||
graph = self._generate_graph()
|
||||
weights = self.w.rvs(graph.number_of_nodes())
|
||||
return MaxWeightStableSetInstance(graph, weights)
|
||||
|
||||
return [_sample() for _ in range(n_samples)]
|
||||
|
||||
|
||||
def _generate_graph(self):
|
||||
return nx.generators.random_graphs.binomial_graph(self.n.rvs(), self.p.rvs())
|
||||
|
||||
|
||||
class MaxWeightStableSetInstance(Instance):
|
||||
"""An instance of the Maximum-Weight Stable Set Problem.
|
||||
|
||||
|
||||
Given a graph G=(V,E) and a weight w_v for each vertex v, the problem asks for a stable
|
||||
set S of G maximizing sum(w_v for v in S). A stable set (also called independent set) is
|
||||
a subset of vertices, no two of which are adjacent.
|
||||
|
||||
|
||||
This is one of Karp's 21 NP-complete problems.
|
||||
"""
|
||||
|
||||
|
||||
def __init__(self, graph, weights):
|
||||
self.graph = graph
|
||||
self.weights = weights
|
||||
@@ -102,13 +108,14 @@ class MaxWeightStableSetInstance(Instance):
|
||||
nodes = list(self.graph.nodes)
|
||||
model = pe.ConcreteModel()
|
||||
model.x = pe.Var(nodes, domain=pe.Binary)
|
||||
model.OBJ = pe.Objective(expr=sum(model.x[v] * self.weights[v] for v in nodes),
|
||||
sense=pe.maximize)
|
||||
model.OBJ = pe.Objective(
|
||||
expr=sum(model.x[v] * self.weights[v] for v in nodes), sense=pe.maximize
|
||||
)
|
||||
model.clique_eqs = pe.ConstraintList()
|
||||
for clique in nx.find_cliques(self.graph):
|
||||
model.clique_eqs.add(sum(model.x[i] for i in clique) <= 1)
|
||||
return model
|
||||
|
||||
|
||||
def get_instance_features(self):
|
||||
return np.ones(0)
|
||||
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
|
||||
@@ -9,17 +9,18 @@ import numpy as np
|
||||
|
||||
|
||||
def test_knapsack_generator():
|
||||
gen = MultiKnapsackGenerator(n=randint(low=100, high=101),
|
||||
m=randint(low=30, high=31),
|
||||
w=randint(low=0, high=1000),
|
||||
K=randint(low=500, high=501),
|
||||
u=uniform(loc=1.0, scale=1.0),
|
||||
alpha=uniform(loc=0.50, scale=0.0),
|
||||
)
|
||||
gen = MultiKnapsackGenerator(
|
||||
n=randint(low=100, high=101),
|
||||
m=randint(low=30, high=31),
|
||||
w=randint(low=0, high=1000),
|
||||
K=randint(low=500, high=501),
|
||||
u=uniform(loc=1.0, scale=1.0),
|
||||
alpha=uniform(loc=0.50, scale=0.0),
|
||||
)
|
||||
instances = gen.generate(100)
|
||||
w_sum = sum(instance.weights for instance in instances) / len(instances)
|
||||
p_sum = sum(instance.prices for instance in instances) / len(instances)
|
||||
b_sum = sum(instance.capacities for instance in instances) / len(instances)
|
||||
assert round(np.mean(w_sum), -1) == 500.
|
||||
assert round(np.mean(w_sum), -1) == 500.0
|
||||
# assert round(np.mean(p_sum), -1) == 1200. # flaky
|
||||
assert round(np.mean(b_sum), -3) == 25000.
|
||||
assert round(np.mean(b_sum), -3) == 25000.0
|
||||
|
||||
@@ -11,36 +11,42 @@ from scipy.stats import uniform, randint
|
||||
|
||||
def test_stab():
|
||||
graph = nx.cycle_graph(5)
|
||||
weights = [1., 1., 1., 1., 1.]
|
||||
weights = [1.0, 1.0, 1.0, 1.0, 1.0]
|
||||
instance = MaxWeightStableSetInstance(graph, weights)
|
||||
solver = LearningSolver()
|
||||
solver.solve(instance)
|
||||
assert instance.lower_bound == 2.
|
||||
|
||||
|
||||
assert instance.lower_bound == 2.0
|
||||
|
||||
|
||||
def test_stab_generator_fixed_graph():
|
||||
np.random.seed(42)
|
||||
from miplearn.problems.stab import MaxWeightStableSetGenerator
|
||||
gen = MaxWeightStableSetGenerator(w=uniform(loc=50., scale=10.),
|
||||
n=randint(low=10, high=11),
|
||||
p=uniform(loc=0.05, scale=0.),
|
||||
fix_graph=True)
|
||||
|
||||
gen = MaxWeightStableSetGenerator(
|
||||
w=uniform(loc=50.0, scale=10.0),
|
||||
n=randint(low=10, high=11),
|
||||
p=uniform(loc=0.05, scale=0.0),
|
||||
fix_graph=True,
|
||||
)
|
||||
instances = gen.generate(1_000)
|
||||
weights = np.array([instance.weights for instance in instances])
|
||||
weights_avg_actual = np.round(np.average(weights, axis=0))
|
||||
weights_avg_expected = [55.0] * 10
|
||||
assert list(weights_avg_actual) == weights_avg_expected
|
||||
|
||||
|
||||
|
||||
|
||||
def test_stab_generator_random_graph():
|
||||
np.random.seed(42)
|
||||
from miplearn.problems.stab import MaxWeightStableSetGenerator
|
||||
gen = MaxWeightStableSetGenerator(w=uniform(loc=50., scale=10.),
|
||||
n=randint(low=30, high=41),
|
||||
p=uniform(loc=0.5, scale=0.),
|
||||
fix_graph=False)
|
||||
|
||||
gen = MaxWeightStableSetGenerator(
|
||||
w=uniform(loc=50.0, scale=10.0),
|
||||
n=randint(low=30, high=41),
|
||||
p=uniform(loc=0.5, scale=0.0),
|
||||
fix_graph=False,
|
||||
)
|
||||
instances = gen.generate(1_000)
|
||||
n_nodes = [instance.graph.number_of_nodes() for instance in instances]
|
||||
n_edges = [instance.graph.number_of_edges() for instance in instances]
|
||||
assert np.round(np.mean(n_nodes)) == 35.
|
||||
assert np.round(np.mean(n_edges), -1) == 300.
|
||||
assert np.round(np.mean(n_nodes)) == 35.0
|
||||
assert np.round(np.mean(n_edges), -1) == 300.0
|
||||
|
||||
@@ -11,11 +11,13 @@ from scipy.stats import uniform, randint
|
||||
|
||||
|
||||
def test_generator():
|
||||
instances = TravelingSalesmanGenerator(x=uniform(loc=0.0, scale=1000.0),
|
||||
y=uniform(loc=0.0, scale=1000.0),
|
||||
n=randint(low=100, high=101),
|
||||
gamma=uniform(loc=0.95, scale=0.1),
|
||||
fix_cities=True).generate(100)
|
||||
instances = TravelingSalesmanGenerator(
|
||||
x=uniform(loc=0.0, scale=1000.0),
|
||||
y=uniform(loc=0.0, scale=1000.0),
|
||||
n=randint(low=100, high=101),
|
||||
gamma=uniform(loc=0.95, scale=0.1),
|
||||
fix_cities=True,
|
||||
).generate(100)
|
||||
assert len(instances) == 100
|
||||
assert instances[0].n_cities == 100
|
||||
assert norm(instances[0].distances - instances[0].distances.T) < 1e-6
|
||||
@@ -25,14 +27,16 @@ def test_generator():
|
||||
|
||||
def test_instance():
|
||||
n_cities = 4
|
||||
distances = np.array([
|
||||
[0., 1., 2., 1.],
|
||||
[1., 0., 1., 2.],
|
||||
[2., 1., 0., 1.],
|
||||
[1., 2., 1., 0.],
|
||||
])
|
||||
distances = np.array(
|
||||
[
|
||||
[0.0, 1.0, 2.0, 1.0],
|
||||
[1.0, 0.0, 1.0, 2.0],
|
||||
[2.0, 1.0, 0.0, 1.0],
|
||||
[1.0, 2.0, 1.0, 0.0],
|
||||
]
|
||||
)
|
||||
instance = TravelingSalesmanInstance(n_cities, distances)
|
||||
for solver_name in ['gurobi']:
|
||||
for solver_name in ["gurobi"]:
|
||||
solver = LearningSolver(solver=solver_name)
|
||||
solver.solve(instance)
|
||||
x = instance.solution["x"]
|
||||
@@ -48,17 +52,19 @@ def test_instance():
|
||||
|
||||
def test_subtour():
|
||||
n_cities = 6
|
||||
cities = np.array([
|
||||
[0., 0.],
|
||||
[1., 0.],
|
||||
[2., 0.],
|
||||
[3., 0.],
|
||||
[0., 1.],
|
||||
[3., 1.],
|
||||
])
|
||||
cities = np.array(
|
||||
[
|
||||
[0.0, 0.0],
|
||||
[1.0, 0.0],
|
||||
[2.0, 0.0],
|
||||
[3.0, 0.0],
|
||||
[0.0, 1.0],
|
||||
[3.0, 1.0],
|
||||
]
|
||||
)
|
||||
distances = squareform(pdist(cities))
|
||||
instance = TravelingSalesmanInstance(n_cities, distances)
|
||||
for solver_name in ['gurobi']:
|
||||
for solver_name in ["gurobi"]:
|
||||
solver = LearningSolver(solver=solver_name)
|
||||
solver.solve(instance)
|
||||
assert hasattr(instance, "found_violated_lazy_constraints")
|
||||
|
||||
@@ -13,41 +13,44 @@ import random
|
||||
|
||||
|
||||
class ChallengeA:
|
||||
def __init__(self,
|
||||
seed=42,
|
||||
n_training_instances=500,
|
||||
n_test_instances=50,
|
||||
):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
seed=42,
|
||||
n_training_instances=500,
|
||||
n_test_instances=50,
|
||||
):
|
||||
|
||||
np.random.seed(seed)
|
||||
self.generator = TravelingSalesmanGenerator(x=uniform(loc=0.0, scale=1000.0),
|
||||
y=uniform(loc=0.0, scale=1000.0),
|
||||
n=randint(low=350, high=351),
|
||||
gamma=uniform(loc=0.95, scale=0.1),
|
||||
fix_cities=True,
|
||||
round=True,
|
||||
)
|
||||
self.generator = TravelingSalesmanGenerator(
|
||||
x=uniform(loc=0.0, scale=1000.0),
|
||||
y=uniform(loc=0.0, scale=1000.0),
|
||||
n=randint(low=350, high=351),
|
||||
gamma=uniform(loc=0.95, scale=0.1),
|
||||
fix_cities=True,
|
||||
round=True,
|
||||
)
|
||||
|
||||
np.random.seed(seed + 1)
|
||||
self.training_instances = self.generator.generate(n_training_instances)
|
||||
|
||||
|
||||
np.random.seed(seed + 2)
|
||||
self.test_instances = self.generator.generate(n_test_instances)
|
||||
|
||||
|
||||
self.test_instances = self.generator.generate(n_test_instances)
|
||||
|
||||
|
||||
class TravelingSalesmanGenerator:
|
||||
"""Random generator for the Traveling Salesman Problem."""
|
||||
|
||||
def __init__(self,
|
||||
x=uniform(loc=0.0, scale=1000.0),
|
||||
y=uniform(loc=0.0, scale=1000.0),
|
||||
n=randint(low=100, high=101),
|
||||
gamma=uniform(loc=1.0, scale=0.0),
|
||||
fix_cities=True,
|
||||
round=True,
|
||||
):
|
||||
def __init__(
|
||||
self,
|
||||
x=uniform(loc=0.0, scale=1000.0),
|
||||
y=uniform(loc=0.0, scale=1000.0),
|
||||
n=randint(low=100, high=101),
|
||||
gamma=uniform(loc=1.0, scale=0.0),
|
||||
fix_cities=True,
|
||||
round=True,
|
||||
):
|
||||
"""Initializes the problem generator.
|
||||
|
||||
|
||||
Initially, the generator creates n cities (x_1,y_1),...,(x_n,y_n) where n, x_i and y_i are
|
||||
sampled independently from the provided probability distributions `n`, `x` and `y`. For each
|
||||
(unordered) pair of cities (i,j), the distance d[i,j] between them is set to:
|
||||
@@ -58,7 +61,7 @@ class TravelingSalesmanGenerator:
|
||||
|
||||
If fix_cities=True, the list of cities is kept the same for all generated instances. The
|
||||
gamma values, and therefore also the distances, are still different.
|
||||
|
||||
|
||||
By default, all distances d[i,j] are rounded to the nearest integer. If `round=False`
|
||||
is provided, this rounding will be disabled.
|
||||
|
||||
@@ -79,19 +82,22 @@ class TravelingSalesmanGenerator:
|
||||
assert isinstance(x, rv_frozen), "x should be a SciPy probability distribution"
|
||||
assert isinstance(y, rv_frozen), "y should be a SciPy probability distribution"
|
||||
assert isinstance(n, rv_frozen), "n should be a SciPy probability distribution"
|
||||
assert isinstance(gamma, rv_frozen), "gamma should be a SciPy probability distribution"
|
||||
assert isinstance(
|
||||
gamma,
|
||||
rv_frozen,
|
||||
), "gamma should be a SciPy probability distribution"
|
||||
self.x = x
|
||||
self.y = y
|
||||
self.n = n
|
||||
self.gamma = gamma
|
||||
self.round = round
|
||||
|
||||
|
||||
if fix_cities:
|
||||
self.fixed_n, self.fixed_cities = self._generate_cities()
|
||||
else:
|
||||
self.fixed_n = None
|
||||
self.fixed_cities = None
|
||||
|
||||
|
||||
def generate(self, n_samples):
|
||||
def _sample():
|
||||
if self.fixed_cities is not None:
|
||||
@@ -103,54 +109,62 @@ class TravelingSalesmanGenerator:
|
||||
if self.round:
|
||||
distances = distances.round()
|
||||
return TravelingSalesmanInstance(n, distances)
|
||||
|
||||
return [_sample() for _ in range(n_samples)]
|
||||
|
||||
|
||||
def _generate_cities(self):
|
||||
n = self.n.rvs()
|
||||
cities = np.array([(self.x.rvs(), self.y.rvs()) for _ in range(n)])
|
||||
return n, cities
|
||||
|
||||
|
||||
|
||||
class TravelingSalesmanInstance(Instance):
|
||||
"""An instance ot the Traveling Salesman Problem.
|
||||
|
||||
|
||||
Given a list of cities and the distance between each pair of cities, the problem asks for the
|
||||
shortest route starting at the first city, visiting each other city exactly once, then
|
||||
returning to the first city. This problem is a generalization of the Hamiltonian path problem,
|
||||
one of Karp's 21 NP-complete problems.
|
||||
"""
|
||||
|
||||
|
||||
def __init__(self, n_cities, distances):
|
||||
assert isinstance(distances, np.ndarray)
|
||||
assert distances.shape == (n_cities, n_cities)
|
||||
self.n_cities = n_cities
|
||||
self.distances = distances
|
||||
|
||||
|
||||
def to_model(self):
|
||||
model = pe.ConcreteModel()
|
||||
model.edges = edges = [(i,j)
|
||||
for i in range(self.n_cities)
|
||||
for j in range(i+1, self.n_cities)]
|
||||
model.edges = edges = [
|
||||
(i, j) for i in range(self.n_cities) for j in range(i + 1, self.n_cities)
|
||||
]
|
||||
model.x = pe.Var(edges, domain=pe.Binary)
|
||||
model.obj = pe.Objective(expr=sum(model.x[i,j] * self.distances[i,j]
|
||||
for (i,j) in edges),
|
||||
sense=pe.minimize)
|
||||
model.obj = pe.Objective(
|
||||
expr=sum(model.x[i, j] * self.distances[i, j] for (i, j) in edges),
|
||||
sense=pe.minimize,
|
||||
)
|
||||
model.eq_degree = pe.ConstraintList()
|
||||
model.eq_subtour = pe.ConstraintList()
|
||||
for i in range(self.n_cities):
|
||||
model.eq_degree.add(sum(model.x[min(i,j), max(i,j)]
|
||||
for j in range(self.n_cities) if i != j) == 2)
|
||||
model.eq_degree.add(
|
||||
sum(
|
||||
model.x[min(i, j), max(i, j)]
|
||||
for j in range(self.n_cities)
|
||||
if i != j
|
||||
)
|
||||
== 2
|
||||
)
|
||||
return model
|
||||
|
||||
|
||||
def get_instance_features(self):
|
||||
return np.array([1])
|
||||
|
||||
|
||||
def get_variable_features(self, var_name, index):
|
||||
return np.array([1])
|
||||
|
||||
|
||||
def get_variable_category(self, var_name, index):
|
||||
return index
|
||||
|
||||
|
||||
def find_violated_lazy_constraints(self, model):
|
||||
selected_edges = [e for e in model.edges if model.x[e].value > 0.5]
|
||||
graph = nx.Graph()
|
||||
@@ -161,15 +175,18 @@ class TravelingSalesmanInstance(Instance):
|
||||
if len(c) < self.n_cities:
|
||||
violations += [c]
|
||||
return violations
|
||||
|
||||
|
||||
def build_lazy_constraint(self, model, component):
|
||||
cut_edges = [e for e in model.edges
|
||||
if (e[0] in component and e[1] not in component) or
|
||||
(e[0] not in component and e[1] in component)]
|
||||
cut_edges = [
|
||||
e
|
||||
for e in model.edges
|
||||
if (e[0] in component and e[1] not in component)
|
||||
or (e[0] not in component and e[1] in component)
|
||||
]
|
||||
return model.eq_subtour.add(sum(model.x[e] for e in cut_edges) >= 2)
|
||||
|
||||
def find_violated_user_cuts(self, model):
|
||||
return self.find_violated_lazy_constraints(model)
|
||||
|
||||
def build_user_cut(self, model, violation):
|
||||
return self.build_lazy_constraint(model, violation)
|
||||
return self.build_lazy_constraint(model, violation)
|
||||
|
||||
@@ -13,10 +13,11 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class GurobiSolver(InternalSolver):
|
||||
def __init__(self,
|
||||
params=None,
|
||||
lazy_cb_frequency=1,
|
||||
):
|
||||
def __init__(
|
||||
self,
|
||||
params=None,
|
||||
lazy_cb_frequency=1,
|
||||
):
|
||||
"""
|
||||
An InternalSolver backed by Gurobi's Python API (without Pyomo).
|
||||
|
||||
@@ -33,6 +34,7 @@ class GurobiSolver(InternalSolver):
|
||||
if params is None:
|
||||
params = {}
|
||||
from gurobipy import GRB
|
||||
|
||||
self.GRB = GRB
|
||||
self.instance = None
|
||||
self.model = None
|
||||
@@ -44,8 +46,7 @@ class GurobiSolver(InternalSolver):
|
||||
if lazy_cb_frequency == 1:
|
||||
self.lazy_cb_where = [self.GRB.Callback.MIPSOL]
|
||||
else:
|
||||
self.lazy_cb_where = [self.GRB.Callback.MIPSOL,
|
||||
self.GRB.Callback.MIPNODE]
|
||||
self.lazy_cb_where = [self.GRB.Callback.MIPSOL, self.GRB.Callback.MIPNODE]
|
||||
|
||||
def set_instance(self, instance, model=None):
|
||||
self._raise_if_callback()
|
||||
@@ -70,14 +71,15 @@ class GurobiSolver(InternalSolver):
|
||||
idx = [0]
|
||||
else:
|
||||
name = m.group(1)
|
||||
idx = tuple(int(k) if k.isdecimal() else k
|
||||
for k in m.group(2).split(","))
|
||||
idx = tuple(
|
||||
int(k) if k.isdecimal() else k for k in m.group(2).split(",")
|
||||
)
|
||||
if len(idx) == 1:
|
||||
idx = idx[0]
|
||||
if name not in self._all_vars:
|
||||
self._all_vars[name] = {}
|
||||
self._all_vars[name][idx] = var
|
||||
if var.vtype != 'C':
|
||||
if var.vtype != "C":
|
||||
if name not in self._bin_vars:
|
||||
self._bin_vars[name] = {}
|
||||
self._bin_vars[name][idx] = var
|
||||
@@ -103,15 +105,9 @@ class GurobiSolver(InternalSolver):
|
||||
for (idx, var) in vardict.items():
|
||||
var.vtype = self.GRB.BINARY
|
||||
log = streams[0].getvalue()
|
||||
return {
|
||||
"Optimal value": self.model.objVal,
|
||||
"Log": log
|
||||
}
|
||||
return {"Optimal value": self.model.objVal, "Log": log}
|
||||
|
||||
def solve(self,
|
||||
tee=False,
|
||||
iteration_cb=None,
|
||||
lazy_cb=None):
|
||||
def solve(self, tee=False, iteration_cb=None, lazy_cb=None):
|
||||
self._raise_if_callback()
|
||||
|
||||
def cb_wrapper(cb_model, cb_where):
|
||||
@@ -133,7 +129,7 @@ class GurobiSolver(InternalSolver):
|
||||
if tee:
|
||||
streams += [sys.stdout]
|
||||
if iteration_cb is None:
|
||||
iteration_cb = lambda : False
|
||||
iteration_cb = lambda: False
|
||||
while True:
|
||||
logger.debug("Solving MIP...")
|
||||
with RedirectOutput(streams):
|
||||
@@ -187,7 +183,9 @@ class GurobiSolver(InternalSolver):
|
||||
elif self.cb_where is None:
|
||||
return var.x
|
||||
else:
|
||||
raise Exception("get_value cannot be called from cb_where=%s" % self.cb_where)
|
||||
raise Exception(
|
||||
"get_value cannot be called from cb_where=%s" % self.cb_where
|
||||
)
|
||||
|
||||
def get_variables(self):
|
||||
self._raise_if_callback()
|
||||
@@ -220,8 +218,10 @@ class GurobiSolver(InternalSolver):
|
||||
if value is not None:
|
||||
count_fixed += 1
|
||||
self._all_vars[varname][idx].start = value
|
||||
logger.info("Setting start values for %d variables (out of %d)" %
|
||||
(count_fixed, count_total))
|
||||
logger.info(
|
||||
"Setting start values for %d variables (out of %d)"
|
||||
% (count_fixed, count_total)
|
||||
)
|
||||
|
||||
def clear_warm_start(self):
|
||||
self._raise_if_callback()
|
||||
@@ -248,10 +248,7 @@ class GurobiSolver(InternalSolver):
|
||||
def extract_constraint(self, cid):
|
||||
self._raise_if_callback()
|
||||
constr = self.model.getConstrByName(cid)
|
||||
cobj = (self.model.getRow(constr),
|
||||
constr.sense,
|
||||
constr.RHS,
|
||||
constr.ConstrName)
|
||||
cobj = (self.model.getRow(constr), constr.sense, constr.RHS, constr.ConstrName)
|
||||
self.model.remove(constr)
|
||||
return cobj
|
||||
|
||||
@@ -316,7 +313,7 @@ class GurobiSolver(InternalSolver):
|
||||
value = matches[0]
|
||||
return value
|
||||
|
||||
def __getstate__(self):
|
||||
def __getstate__(self):
|
||||
return {
|
||||
"params": self.params,
|
||||
"lazy_cb_where": self.lazy_cb_where,
|
||||
@@ -324,6 +321,7 @@ class GurobiSolver(InternalSolver):
|
||||
|
||||
def __setstate__(self, state):
|
||||
from gurobipy import GRB
|
||||
|
||||
self.params = state["params"]
|
||||
self.lazy_cb_where = state["lazy_cb_where"]
|
||||
self.GRB = GRB
|
||||
@@ -331,4 +329,4 @@ class GurobiSolver(InternalSolver):
|
||||
self.model = None
|
||||
self._all_vars = None
|
||||
self._bin_vars = None
|
||||
self.cb_where = None
|
||||
self.cb_where = None
|
||||
|
||||
@@ -222,4 +222,3 @@ class InternalSolver(ABC):
|
||||
for idx in indices:
|
||||
solution[var][idx] = 0.0
|
||||
return solution
|
||||
|
||||
|
||||
@@ -12,10 +12,12 @@ from copy import deepcopy
|
||||
from typing import Optional, List
|
||||
from p_tqdm import p_map
|
||||
|
||||
from .. import (ObjectiveValueComponent,
|
||||
PrimalSolutionComponent,
|
||||
DynamicLazyConstraintsComponent,
|
||||
UserCutsComponent)
|
||||
from .. import (
|
||||
ObjectiveValueComponent,
|
||||
PrimalSolutionComponent,
|
||||
DynamicLazyConstraintsComponent,
|
||||
UserCutsComponent,
|
||||
)
|
||||
from .pyomo.cplex import CplexPyomoSolver
|
||||
from .pyomo.gurobi import GurobiPyomoSolver
|
||||
|
||||
@@ -43,16 +45,18 @@ def _parallel_solve(idx):
|
||||
|
||||
|
||||
class LearningSolver:
|
||||
def __init__(self,
|
||||
components=None,
|
||||
gap_tolerance=1e-4,
|
||||
mode="exact",
|
||||
solver="gurobi",
|
||||
threads=None,
|
||||
time_limit=None,
|
||||
node_limit=None,
|
||||
solve_lp_first=True,
|
||||
use_lazy_cb=False):
|
||||
def __init__(
|
||||
self,
|
||||
components=None,
|
||||
gap_tolerance=1e-4,
|
||||
mode="exact",
|
||||
solver="gurobi",
|
||||
threads=None,
|
||||
time_limit=None,
|
||||
node_limit=None,
|
||||
solve_lp_first=True,
|
||||
use_lazy_cb=False,
|
||||
):
|
||||
"""
|
||||
Mixed-Integer Linear Programming (MIP) solver that extracts information
|
||||
from previous runs and uses Machine Learning methods to accelerate the
|
||||
@@ -142,28 +146,30 @@ class LearningSolver:
|
||||
solver.set_node_limit(self.node_limit)
|
||||
return solver
|
||||
|
||||
def solve(self,
|
||||
instance,
|
||||
model=None,
|
||||
output="",
|
||||
tee=False):
|
||||
def solve(
|
||||
self,
|
||||
instance,
|
||||
model=None,
|
||||
output="",
|
||||
tee=False,
|
||||
):
|
||||
"""
|
||||
Solves the given instance. If trained machine-learning models are
|
||||
available, they will be used to accelerate the solution process.
|
||||
|
||||
|
||||
The argument `instance` may be either an Instance object or a
|
||||
filename pointing to a pickled Instance object.
|
||||
filename pointing to a pickled Instance object.
|
||||
|
||||
This method modifies the instance object. Specifically, the following
|
||||
properties are set:
|
||||
|
||||
|
||||
- instance.lp_solution
|
||||
- instance.lp_value
|
||||
- instance.lower_bound
|
||||
- instance.upper_bound
|
||||
- instance.solution
|
||||
- instance.solver_log
|
||||
|
||||
|
||||
Additional solver components may set additional properties. Please
|
||||
see their documentation for more details. If a filename is provided,
|
||||
then the file is modified in-place. That is, the original file is
|
||||
@@ -197,7 +203,7 @@ class LearningSolver:
|
||||
"Predicted UB". See the documentation of each component for more
|
||||
details.
|
||||
"""
|
||||
|
||||
|
||||
filename = None
|
||||
fileformat = None
|
||||
if isinstance(instance, str):
|
||||
@@ -211,7 +217,7 @@ class LearningSolver:
|
||||
fileformat = "pickle"
|
||||
with open(filename, "rb") as file:
|
||||
instance = pickle.load(file)
|
||||
|
||||
|
||||
if model is None:
|
||||
model = instance.to_model()
|
||||
|
||||
@@ -248,9 +254,11 @@ class LearningSolver:
|
||||
lazy_cb = lazy_cb_wrapper
|
||||
|
||||
logger.info("Solving MILP...")
|
||||
results = self.internal_solver.solve(tee=tee,
|
||||
iteration_cb=iteration_cb,
|
||||
lazy_cb=lazy_cb)
|
||||
results = self.internal_solver.solve(
|
||||
tee=tee,
|
||||
iteration_cb=iteration_cb,
|
||||
lazy_cb=lazy_cb,
|
||||
)
|
||||
results["LP value"] = instance.lp_value
|
||||
|
||||
# Read MIP solution and bounds
|
||||
@@ -262,7 +270,7 @@ class LearningSolver:
|
||||
logger.debug("Calling after_solve callbacks...")
|
||||
for component in self.components.values():
|
||||
component.after_solve(self, instance, model, results)
|
||||
|
||||
|
||||
if filename is not None and output is not None:
|
||||
output_filename = output
|
||||
if len(output) == 0:
|
||||
@@ -280,36 +288,38 @@ class LearningSolver:
|
||||
def parallel_solve(self, instances, n_jobs=4, label="Solve", output=[]):
|
||||
"""
|
||||
Solves multiple instances in parallel.
|
||||
|
||||
|
||||
This method is equivalent to calling `solve` for each item on the list,
|
||||
but it processes multiple instances at the same time. Like `solve`, this
|
||||
method modifies each instance in place. Also like `solve`, a list of
|
||||
filenames may be provided.
|
||||
|
||||
|
||||
Parameters
|
||||
----------
|
||||
instances: [miplearn.Instance] or [str]
|
||||
The instances to be solved
|
||||
n_jobs: int
|
||||
Number of instances to solve in parallel at a time.
|
||||
|
||||
|
||||
Returns
|
||||
-------
|
||||
Returns a list of dictionaries, with one entry for each provided instance.
|
||||
This dictionary is the same you would obtain by calling:
|
||||
|
||||
|
||||
[solver.solve(p) for p in instances]
|
||||
|
||||
|
||||
"""
|
||||
self.internal_solver = None
|
||||
self._silence_miplearn_logger()
|
||||
SOLVER[0] = self
|
||||
OUTPUTS[0] = output
|
||||
INSTANCES[0] = instances
|
||||
results = p_map(_parallel_solve,
|
||||
list(range(len(instances))),
|
||||
num_cpus=n_jobs,
|
||||
desc=label)
|
||||
results = p_map(
|
||||
_parallel_solve,
|
||||
list(range(len(instances))),
|
||||
num_cpus=n_jobs,
|
||||
desc=label,
|
||||
)
|
||||
stats = []
|
||||
for (idx, (s, instance)) in enumerate(results):
|
||||
stats.append(s)
|
||||
@@ -330,12 +340,12 @@ class LearningSolver:
|
||||
def _silence_miplearn_logger(self):
|
||||
miplearn_logger = logging.getLogger("miplearn")
|
||||
self.prev_log_level = miplearn_logger.getEffectiveLevel()
|
||||
miplearn_logger.setLevel(logging.WARNING)
|
||||
|
||||
miplearn_logger.setLevel(logging.WARNING)
|
||||
|
||||
def _restore_miplearn_logger(self):
|
||||
miplearn_logger = logging.getLogger("miplearn")
|
||||
miplearn_logger.setLevel(self.prev_log_level)
|
||||
|
||||
miplearn_logger.setLevel(self.prev_log_level)
|
||||
|
||||
def __getstate__(self):
|
||||
self.internal_solver = None
|
||||
return self.__dict__
|
||||
|
||||
@@ -81,8 +81,10 @@ class BasePyomoSolver(InternalSolver):
|
||||
count_fixed += 1
|
||||
if count_fixed > 0:
|
||||
self._is_warm_start_available = True
|
||||
logger.info("Setting start values for %d variables (out of %d)" %
|
||||
(count_fixed, count_total))
|
||||
logger.info(
|
||||
"Setting start values for %d variables (out of %d)"
|
||||
% (count_fixed, count_total)
|
||||
)
|
||||
|
||||
def clear_warm_start(self):
|
||||
for var in self._all_vars:
|
||||
@@ -134,17 +136,19 @@ class BasePyomoSolver(InternalSolver):
|
||||
count_fixed += 1
|
||||
var[index].fix(solution[varname][index])
|
||||
self._pyomo_solver.update_var(var[index])
|
||||
logger.info("Fixing values for %d variables (out of %d)" %
|
||||
(count_fixed, count_total))
|
||||
logger.info(
|
||||
"Fixing values for %d variables (out of %d)"
|
||||
% (
|
||||
count_fixed,
|
||||
count_total,
|
||||
)
|
||||
)
|
||||
|
||||
def add_constraint(self, constraint):
|
||||
self._pyomo_solver.add_constraint(constraint)
|
||||
self._update_constrs()
|
||||
|
||||
def solve(self,
|
||||
tee=False,
|
||||
iteration_cb=None,
|
||||
lazy_cb=None):
|
||||
def solve(self, tee=False, iteration_cb=None, lazy_cb=None):
|
||||
if lazy_cb is not None:
|
||||
raise Exception("lazy callback not supported")
|
||||
total_wallclock_time = 0
|
||||
@@ -158,8 +162,10 @@ class BasePyomoSolver(InternalSolver):
|
||||
while True:
|
||||
logger.debug("Solving MIP...")
|
||||
with RedirectOutput(streams):
|
||||
results = self._pyomo_solver.solve(tee=True,
|
||||
warmstart=self._is_warm_start_available)
|
||||
results = self._pyomo_solver.solve(
|
||||
tee=True,
|
||||
warmstart=self._is_warm_start_available,
|
||||
)
|
||||
total_wallclock_time += results["Solver"][0]["Wallclock time"]
|
||||
should_repeat = iteration_cb()
|
||||
if not should_repeat:
|
||||
@@ -192,9 +198,7 @@ class BasePyomoSolver(InternalSolver):
|
||||
return value
|
||||
|
||||
def _extract_node_count(self, log):
|
||||
return int(self.__extract(log,
|
||||
self._get_node_count_regexp(),
|
||||
default=1))
|
||||
return int(self.__extract(log, self._get_node_count_regexp(), default=1))
|
||||
|
||||
def set_threads(self, threads):
|
||||
key = self._get_threads_option_name()
|
||||
@@ -249,4 +253,4 @@ class BasePyomoSolver(InternalSolver):
|
||||
raise Exception("not implemented")
|
||||
|
||||
def get_constraint_slacks(self):
|
||||
raise Exception("not implemented")
|
||||
raise Exception("not implemented")
|
||||
|
||||
@@ -20,7 +20,7 @@ class CplexPyomoSolver(BasePyomoSolver):
|
||||
{"mip_display": 5} to increase the log verbosity.
|
||||
"""
|
||||
super().__init__()
|
||||
self._pyomo_solver = pe.SolverFactory('cplex_persistent')
|
||||
self._pyomo_solver = pe.SolverFactory("cplex_persistent")
|
||||
self._pyomo_solver.options["randomseed"] = randint(low=0, high=1000).rvs()
|
||||
self._pyomo_solver.options["mip_display"] = 4
|
||||
if options is not None:
|
||||
|
||||
@@ -15,8 +15,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class GurobiPyomoSolver(BasePyomoSolver):
|
||||
def __init__(self,
|
||||
options=None):
|
||||
def __init__(self, options=None):
|
||||
"""
|
||||
Creates a new Gurobi solver, accessed through Pyomo.
|
||||
|
||||
@@ -27,7 +26,7 @@ class GurobiPyomoSolver(BasePyomoSolver):
|
||||
{"Threads": 4} to set the number of threads.
|
||||
"""
|
||||
super().__init__()
|
||||
self._pyomo_solver = pe.SolverFactory('gurobi_persistent')
|
||||
self._pyomo_solver = pe.SolverFactory("gurobi_persistent")
|
||||
self._pyomo_solver.options["Seed"] = randint(low=0, high=1000).rvs()
|
||||
if options is not None:
|
||||
for (key, value) in options.items():
|
||||
@@ -56,6 +55,7 @@ class GurobiPyomoSolver(BasePyomoSolver):
|
||||
|
||||
def set_branching_priorities(self, priorities):
|
||||
from gurobipy import GRB
|
||||
|
||||
for varname in priorities.keys():
|
||||
var = self._varname_to_var[varname]
|
||||
for (index, priority) in priorities[varname].items():
|
||||
|
||||
@@ -9,20 +9,22 @@ from miplearn.problems.knapsack import KnapsackInstance, GurobiKnapsackInstance
|
||||
|
||||
def _get_instance(solver):
|
||||
def _is_subclass_or_instance(solver, parentClass):
|
||||
return isinstance(solver, parentClass) or (isclass(solver) and issubclass(solver, parentClass))
|
||||
return isinstance(solver, parentClass) or (
|
||||
isclass(solver) and issubclass(solver, parentClass)
|
||||
)
|
||||
|
||||
if _is_subclass_or_instance(solver, BasePyomoSolver):
|
||||
return KnapsackInstance(
|
||||
weights=[23., 26., 20., 18.],
|
||||
prices=[505., 352., 458., 220.],
|
||||
capacity=67.,
|
||||
weights=[23.0, 26.0, 20.0, 18.0],
|
||||
prices=[505.0, 352.0, 458.0, 220.0],
|
||||
capacity=67.0,
|
||||
)
|
||||
|
||||
if _is_subclass_or_instance(solver, GurobiSolver):
|
||||
return GurobiKnapsackInstance(
|
||||
weights=[23., 26., 20., 18.],
|
||||
prices=[505., 352., 458., 220.],
|
||||
capacity=67.,
|
||||
weights=[23.0, 26.0, 20.0, 18.0],
|
||||
prices=[505.0, 352.0, 458.0, 220.0],
|
||||
capacity=67.0,
|
||||
)
|
||||
|
||||
assert False
|
||||
|
||||
@@ -16,6 +16,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
def test_redirect_output():
|
||||
import sys
|
||||
|
||||
original_stdout = sys.stdout
|
||||
io = StringIO()
|
||||
with RedirectOutput([io]):
|
||||
@@ -31,36 +32,42 @@ def test_internal_solver_warm_starts():
|
||||
model = instance.to_model()
|
||||
solver = solver_class()
|
||||
solver.set_instance(instance, model)
|
||||
solver.set_warm_start({
|
||||
"x": {
|
||||
0: 1.0,
|
||||
1: 0.0,
|
||||
2: 0.0,
|
||||
3: 1.0,
|
||||
solver.set_warm_start(
|
||||
{
|
||||
"x": {
|
||||
0: 1.0,
|
||||
1: 0.0,
|
||||
2: 0.0,
|
||||
3: 1.0,
|
||||
}
|
||||
}
|
||||
})
|
||||
)
|
||||
stats = solver.solve(tee=True)
|
||||
assert stats["Warm start value"] == 725.0
|
||||
|
||||
solver.set_warm_start({
|
||||
"x": {
|
||||
0: 1.0,
|
||||
1: 1.0,
|
||||
2: 1.0,
|
||||
3: 1.0,
|
||||
solver.set_warm_start(
|
||||
{
|
||||
"x": {
|
||||
0: 1.0,
|
||||
1: 1.0,
|
||||
2: 1.0,
|
||||
3: 1.0,
|
||||
}
|
||||
}
|
||||
})
|
||||
)
|
||||
stats = solver.solve(tee=True)
|
||||
assert stats["Warm start value"] is None
|
||||
|
||||
solver.fix({
|
||||
"x": {
|
||||
0: 1.0,
|
||||
1: 0.0,
|
||||
2: 0.0,
|
||||
3: 1.0,
|
||||
solver.fix(
|
||||
{
|
||||
"x": {
|
||||
0: 1.0,
|
||||
1: 0.0,
|
||||
2: 0.0,
|
||||
3: 1.0,
|
||||
}
|
||||
}
|
||||
})
|
||||
)
|
||||
stats = solver.solve(tee=True)
|
||||
assert stats["Lower bound"] == 725.0
|
||||
assert stats["Upper bound"] == 725.0
|
||||
|
||||
@@ -20,11 +20,13 @@ def test_learning_solver():
|
||||
for internal_solver in _get_internal_solvers():
|
||||
logger.info("Solver: %s" % internal_solver)
|
||||
instance = _get_instance(internal_solver)
|
||||
solver = LearningSolver(time_limit=300,
|
||||
gap_tolerance=1e-3,
|
||||
threads=1,
|
||||
solver=internal_solver,
|
||||
mode=mode)
|
||||
solver = LearningSolver(
|
||||
time_limit=300,
|
||||
gap_tolerance=1e-3,
|
||||
threads=1,
|
||||
solver=internal_solver,
|
||||
mode=mode,
|
||||
)
|
||||
|
||||
solver.solve(instance)
|
||||
assert instance.solution["x"][0] == 1.0
|
||||
@@ -74,37 +76,36 @@ def test_solve_fit_from_disk():
|
||||
filenames = []
|
||||
for k in range(3):
|
||||
instance = _get_instance(internal_solver)
|
||||
with tempfile.NamedTemporaryFile(suffix=".pkl",
|
||||
delete=False) as file:
|
||||
with tempfile.NamedTemporaryFile(suffix=".pkl", delete=False) as file:
|
||||
filenames += [file.name]
|
||||
pickle.dump(instance, file)
|
||||
|
||||
|
||||
# Test: solve
|
||||
solver = LearningSolver(solver=internal_solver)
|
||||
solver.solve(filenames[0])
|
||||
with open(filenames[0], "rb") as file:
|
||||
instance = pickle.load(file)
|
||||
assert hasattr(instance, "solution")
|
||||
|
||||
|
||||
# Test: parallel_solve
|
||||
solver.parallel_solve(filenames)
|
||||
for filename in filenames:
|
||||
with open(filename, "rb") as file:
|
||||
instance = pickle.load(file)
|
||||
assert hasattr(instance, "solution")
|
||||
|
||||
|
||||
# Test: solve (with specified output)
|
||||
output = [f + ".out" for f in filenames]
|
||||
solver.solve(filenames[0], output=output[0])
|
||||
assert os.path.isfile(output[0])
|
||||
|
||||
|
||||
# Test: parallel_solve (with specified output)
|
||||
solver.parallel_solve(filenames, output=output)
|
||||
for filename in output:
|
||||
assert os.path.isfile(filename)
|
||||
|
||||
|
||||
# Delete temporary files
|
||||
for filename in filenames:
|
||||
os.remove(filename)
|
||||
for filename in output:
|
||||
os.remove(filename)
|
||||
os.remove(filename)
|
||||
|
||||
@@ -8,14 +8,14 @@ from miplearn.problems.knapsack import KnapsackInstance
|
||||
def get_test_pyomo_instances():
|
||||
instances = [
|
||||
KnapsackInstance(
|
||||
weights=[23., 26., 20., 18.],
|
||||
prices=[505., 352., 458., 220.],
|
||||
capacity=67.,
|
||||
weights=[23.0, 26.0, 20.0, 18.0],
|
||||
prices=[505.0, 352.0, 458.0, 220.0],
|
||||
capacity=67.0,
|
||||
),
|
||||
KnapsackInstance(
|
||||
weights=[25., 30., 22., 18.],
|
||||
prices=[500., 365., 420., 150.],
|
||||
capacity=70.,
|
||||
weights=[25.0, 30.0, 22.0, 18.0],
|
||||
prices=[500.0, 365.0, 420.0, 150.0],
|
||||
capacity=70.0,
|
||||
),
|
||||
]
|
||||
models = [instance.to_model() for instance in instances]
|
||||
|
||||
@@ -11,8 +11,9 @@ from scipy.stats import randint
|
||||
|
||||
def test_benchmark():
|
||||
# Generate training and test instances
|
||||
train_instances = MaxWeightStableSetGenerator(n=randint(low=25, high=26)).generate(5)
|
||||
test_instances = MaxWeightStableSetGenerator(n=randint(low=25, high=26)).generate(3)
|
||||
generator = MaxWeightStableSetGenerator(n=randint(low=25, high=26))
|
||||
train_instances = generator.generate(5)
|
||||
test_instances = generator.generate(3)
|
||||
|
||||
# Training phase...
|
||||
training_solver = LearningSolver()
|
||||
@@ -26,11 +27,11 @@ def test_benchmark():
|
||||
benchmark = BenchmarkRunner(test_solvers)
|
||||
benchmark.fit(train_instances)
|
||||
benchmark.parallel_solve(test_instances, n_jobs=2, n_trials=2)
|
||||
assert benchmark.raw_results().values.shape == (12,16)
|
||||
|
||||
assert benchmark.raw_results().values.shape == (12, 16)
|
||||
|
||||
benchmark.save_results("/tmp/benchmark.csv")
|
||||
assert os.path.isfile("/tmp/benchmark.csv")
|
||||
|
||||
|
||||
benchmark = BenchmarkRunner(test_solvers)
|
||||
benchmark.load_results("/tmp/benchmark.csv")
|
||||
assert benchmark.raw_results().values.shape == (12,16)
|
||||
assert benchmark.raw_results().values.shape == (12, 16)
|
||||
|
||||
@@ -3,25 +3,28 @@
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from miplearn.problems.knapsack import KnapsackInstance
|
||||
from miplearn import (LearningSolver,
|
||||
SolutionExtractor,
|
||||
InstanceFeaturesExtractor,
|
||||
VariableFeaturesExtractor,
|
||||
)
|
||||
from miplearn import (
|
||||
LearningSolver,
|
||||
SolutionExtractor,
|
||||
InstanceFeaturesExtractor,
|
||||
VariableFeaturesExtractor,
|
||||
)
|
||||
import numpy as np
|
||||
import pyomo.environ as pe
|
||||
|
||||
|
||||
def _get_instances():
|
||||
instances = [
|
||||
KnapsackInstance(weights=[1., 2., 3.],
|
||||
prices=[10., 20., 30.],
|
||||
capacity=2.5,
|
||||
),
|
||||
KnapsackInstance(weights=[3., 4., 5.],
|
||||
prices=[20., 30., 40.],
|
||||
capacity=4.5,
|
||||
),
|
||||
KnapsackInstance(
|
||||
weights=[1.0, 2.0, 3.0],
|
||||
prices=[10.0, 20.0, 30.0],
|
||||
capacity=2.5,
|
||||
),
|
||||
KnapsackInstance(
|
||||
weights=[3.0, 4.0, 5.0],
|
||||
prices=[20.0, 30.0, 40.0],
|
||||
capacity=4.5,
|
||||
),
|
||||
]
|
||||
models = [instance.to_model() for instance in instances]
|
||||
solver = LearningSolver()
|
||||
@@ -38,25 +41,30 @@ def test_solution_extractor():
|
||||
assert isinstance(features["default"], np.ndarray)
|
||||
assert features["default"].shape == (6, 2)
|
||||
assert features["default"].ravel().tolist() == [
|
||||
1., 0.,
|
||||
0., 1.,
|
||||
1., 0.,
|
||||
1., 0.,
|
||||
0., 1.,
|
||||
1., 0.,
|
||||
1.0,
|
||||
0.0,
|
||||
0.0,
|
||||
1.0,
|
||||
1.0,
|
||||
0.0,
|
||||
1.0,
|
||||
0.0,
|
||||
0.0,
|
||||
1.0,
|
||||
1.0,
|
||||
0.0,
|
||||
]
|
||||
|
||||
|
||||
|
||||
def test_instance_features_extractor():
|
||||
instances, models = _get_instances()
|
||||
features = InstanceFeaturesExtractor().extract(instances)
|
||||
assert features.shape == (2,3)
|
||||
|
||||
|
||||
assert features.shape == (2, 3)
|
||||
|
||||
|
||||
def test_variable_features_extractor():
|
||||
instances, models = _get_instances()
|
||||
features = VariableFeaturesExtractor().extract(instances)
|
||||
assert isinstance(features, dict)
|
||||
assert "default" in features
|
||||
assert features["default"].shape == (6,5)
|
||||
|
||||
assert features["default"].shape == (6, 5)
|
||||
|
||||
3
pyproject.toml
Normal file
3
pyproject.toml
Normal file
@@ -0,0 +1,3 @@
|
||||
[tool.black]
|
||||
py36 = true
|
||||
include = '\.pyi?$'
|
||||
@@ -12,3 +12,5 @@ python-markdown-math~=0.8
|
||||
seaborn~=0.11
|
||||
scikit-learn~=0.23
|
||||
tqdm~=4.54
|
||||
black==20.8b1
|
||||
pre-commit~=2.9
|
||||
|
||||
Reference in New Issue
Block a user