mirror of
https://github.com/ANL-CEEESA/MIPLearn.git
synced 2025-12-06 01:18:52 -06:00
Reformat source code with Black; add pre-commit hooks and CI checks
This commit is contained in:
11
.github/workflows/lint.yml
vendored
Normal file
11
.github/workflows/lint.yml
vendored
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
name: Lint
|
||||||
|
|
||||||
|
on: [push, pull_request]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
lint:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
- uses: actions/setup-python@v2
|
||||||
|
- uses: psf/black@stable
|
||||||
6
.pre-commit-config.yaml
Normal file
6
.pre-commit-config.yaml
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
repos:
|
||||||
|
- repo: https://github.com/ambv/black
|
||||||
|
rev: stable
|
||||||
|
hooks:
|
||||||
|
- id: black
|
||||||
|
args: ["--check"]
|
||||||
3
Makefile
3
Makefile
@@ -34,6 +34,9 @@ install:
|
|||||||
uninstall:
|
uninstall:
|
||||||
$(PIP) uninstall miplearn
|
$(PIP) uninstall miplearn
|
||||||
|
|
||||||
|
reformat:
|
||||||
|
$(PYTHON) -m black miplearn
|
||||||
|
|
||||||
test:
|
test:
|
||||||
$(PYTEST) $(PYTEST_ARGS)
|
$(PYTEST) $(PYTEST_ARGS)
|
||||||
|
|
||||||
|
|||||||
@@ -2,10 +2,12 @@
|
|||||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||||
# Released under the modified BSD license. See COPYING.md for more details.
|
# Released under the modified BSD license. See COPYING.md for more details.
|
||||||
|
|
||||||
from .extractors import (SolutionExtractor,
|
from .extractors import (
|
||||||
InstanceFeaturesExtractor,
|
SolutionExtractor,
|
||||||
ObjectiveValueExtractor,
|
InstanceFeaturesExtractor,
|
||||||
VariableFeaturesExtractor)
|
ObjectiveValueExtractor,
|
||||||
|
VariableFeaturesExtractor,
|
||||||
|
)
|
||||||
|
|
||||||
from .components.component import Component
|
from .components.component import Component
|
||||||
from .components.objective import ObjectiveValueComponent
|
from .components.objective import ObjectiveValueComponent
|
||||||
|
|||||||
@@ -19,43 +19,53 @@ class BenchmarkRunner:
|
|||||||
assert isinstance(solver, LearningSolver)
|
assert isinstance(solver, LearningSolver)
|
||||||
self.solvers = solvers
|
self.solvers = solvers
|
||||||
self.results = None
|
self.results = None
|
||||||
|
|
||||||
def solve(self, instances, tee=False):
|
def solve(self, instances, tee=False):
|
||||||
for (solver_name, solver) in self.solvers.items():
|
for (solver_name, solver) in self.solvers.items():
|
||||||
for i in tqdm(range(len((instances)))):
|
for i in tqdm(range(len((instances)))):
|
||||||
results = solver.solve(deepcopy(instances[i]), tee=tee)
|
results = solver.solve(deepcopy(instances[i]), tee=tee)
|
||||||
self._push_result(results, solver=solver, solver_name=solver_name, instance=i)
|
self._push_result(
|
||||||
|
results,
|
||||||
|
solver=solver,
|
||||||
|
solver_name=solver_name,
|
||||||
|
instance=i,
|
||||||
|
)
|
||||||
|
|
||||||
def parallel_solve(self,
|
def parallel_solve(
|
||||||
instances,
|
self,
|
||||||
n_jobs=1,
|
instances,
|
||||||
n_trials=1,
|
n_jobs=1,
|
||||||
index_offset=0,
|
n_trials=1,
|
||||||
):
|
index_offset=0,
|
||||||
|
):
|
||||||
self._silence_miplearn_logger()
|
self._silence_miplearn_logger()
|
||||||
trials = instances * n_trials
|
trials = instances * n_trials
|
||||||
for (solver_name, solver) in self.solvers.items():
|
for (solver_name, solver) in self.solvers.items():
|
||||||
results = solver.parallel_solve(trials,
|
results = solver.parallel_solve(
|
||||||
n_jobs=n_jobs,
|
trials,
|
||||||
label="Solve (%s)" % solver_name,
|
n_jobs=n_jobs,
|
||||||
output=None)
|
label="Solve (%s)" % solver_name,
|
||||||
|
output=None,
|
||||||
|
)
|
||||||
for i in range(len(trials)):
|
for i in range(len(trials)):
|
||||||
idx = (i % len(instances)) + index_offset
|
idx = (i % len(instances)) + index_offset
|
||||||
self._push_result(results[i],
|
self._push_result(
|
||||||
solver=solver,
|
results[i],
|
||||||
solver_name=solver_name,
|
solver=solver,
|
||||||
instance=idx)
|
solver_name=solver_name,
|
||||||
|
instance=idx,
|
||||||
|
)
|
||||||
self._restore_miplearn_logger()
|
self._restore_miplearn_logger()
|
||||||
|
|
||||||
def raw_results(self):
|
def raw_results(self):
|
||||||
return self.results
|
return self.results
|
||||||
|
|
||||||
def save_results(self, filename):
|
def save_results(self, filename):
|
||||||
self.results.to_csv(filename)
|
self.results.to_csv(filename)
|
||||||
|
|
||||||
def load_results(self, filename):
|
def load_results(self, filename):
|
||||||
self.results = pd.read_csv(filename, index_col=0)
|
self.results = pd.read_csv(filename, index_col=0)
|
||||||
|
|
||||||
def load_state(self, filename):
|
def load_state(self, filename):
|
||||||
for (solver_name, solver) in self.solvers.items():
|
for (solver_name, solver) in self.solvers.items():
|
||||||
solver.load_state(filename)
|
solver.load_state(filename)
|
||||||
@@ -63,62 +73,69 @@ class BenchmarkRunner:
|
|||||||
def fit(self, training_instances):
|
def fit(self, training_instances):
|
||||||
for (solver_name, solver) in self.solvers.items():
|
for (solver_name, solver) in self.solvers.items():
|
||||||
solver.fit(training_instances)
|
solver.fit(training_instances)
|
||||||
|
|
||||||
def _push_result(self, result, solver, solver_name, instance):
|
def _push_result(self, result, solver, solver_name, instance):
|
||||||
if self.results is None:
|
if self.results is None:
|
||||||
self.results = pd.DataFrame(columns=["Solver",
|
self.results = pd.DataFrame(
|
||||||
"Instance",
|
columns=[
|
||||||
"Wallclock Time",
|
"Solver",
|
||||||
"Lower Bound",
|
"Instance",
|
||||||
"Upper Bound",
|
"Wallclock Time",
|
||||||
"Gap",
|
"Lower Bound",
|
||||||
"Nodes",
|
"Upper Bound",
|
||||||
"Mode",
|
"Gap",
|
||||||
"Sense",
|
"Nodes",
|
||||||
"Predicted LB",
|
"Mode",
|
||||||
"Predicted UB",
|
"Sense",
|
||||||
])
|
"Predicted LB",
|
||||||
|
"Predicted UB",
|
||||||
|
]
|
||||||
|
)
|
||||||
lb = result["Lower bound"]
|
lb = result["Lower bound"]
|
||||||
ub = result["Upper bound"]
|
ub = result["Upper bound"]
|
||||||
gap = (ub - lb) / lb
|
gap = (ub - lb) / lb
|
||||||
if "Predicted LB" not in result:
|
if "Predicted LB" not in result:
|
||||||
result["Predicted LB"] = float("nan")
|
result["Predicted LB"] = float("nan")
|
||||||
result["Predicted UB"] = float("nan")
|
result["Predicted UB"] = float("nan")
|
||||||
self.results = self.results.append({
|
self.results = self.results.append(
|
||||||
"Solver": solver_name,
|
{
|
||||||
"Instance": instance,
|
"Solver": solver_name,
|
||||||
"Wallclock Time": result["Wallclock time"],
|
"Instance": instance,
|
||||||
"Lower Bound": lb,
|
"Wallclock Time": result["Wallclock time"],
|
||||||
"Upper Bound": ub,
|
"Lower Bound": lb,
|
||||||
"Gap": gap,
|
"Upper Bound": ub,
|
||||||
"Nodes": result["Nodes"],
|
"Gap": gap,
|
||||||
"Mode": solver.mode,
|
"Nodes": result["Nodes"],
|
||||||
"Sense": result["Sense"],
|
"Mode": solver.mode,
|
||||||
"Predicted LB": result["Predicted LB"],
|
"Sense": result["Sense"],
|
||||||
"Predicted UB": result["Predicted UB"],
|
"Predicted LB": result["Predicted LB"],
|
||||||
}, ignore_index=True)
|
"Predicted UB": result["Predicted UB"],
|
||||||
|
},
|
||||||
|
ignore_index=True,
|
||||||
|
)
|
||||||
groups = self.results.groupby("Instance")
|
groups = self.results.groupby("Instance")
|
||||||
best_lower_bound = groups["Lower Bound"].transform("max")
|
best_lower_bound = groups["Lower Bound"].transform("max")
|
||||||
best_upper_bound = groups["Upper Bound"].transform("min")
|
best_upper_bound = groups["Upper Bound"].transform("min")
|
||||||
best_gap = groups["Gap"].transform("min")
|
best_gap = groups["Gap"].transform("min")
|
||||||
best_nodes = np.maximum(1, groups["Nodes"].transform("min"))
|
best_nodes = np.maximum(1, groups["Nodes"].transform("min"))
|
||||||
best_wallclock_time = groups["Wallclock Time"].transform("min")
|
best_wallclock_time = groups["Wallclock Time"].transform("min")
|
||||||
self.results["Relative Lower Bound"] = \
|
self.results["Relative Lower Bound"] = (
|
||||||
self.results["Lower Bound"] / best_lower_bound
|
self.results["Lower Bound"] / best_lower_bound
|
||||||
self.results["Relative Upper Bound"] = \
|
)
|
||||||
self.results["Upper Bound"] / best_upper_bound
|
self.results["Relative Upper Bound"] = (
|
||||||
self.results["Relative Wallclock Time"] = \
|
self.results["Upper Bound"] / best_upper_bound
|
||||||
self.results["Wallclock Time"] / best_wallclock_time
|
)
|
||||||
self.results["Relative Gap"] = \
|
self.results["Relative Wallclock Time"] = (
|
||||||
self.results["Gap"] / best_gap
|
self.results["Wallclock Time"] / best_wallclock_time
|
||||||
self.results["Relative Nodes"] = \
|
)
|
||||||
self.results["Nodes"] / best_nodes
|
self.results["Relative Gap"] = self.results["Gap"] / best_gap
|
||||||
|
self.results["Relative Nodes"] = self.results["Nodes"] / best_nodes
|
||||||
|
|
||||||
def save_chart(self, filename):
|
def save_chart(self, filename):
|
||||||
import matplotlib.pyplot as plt
|
import matplotlib.pyplot as plt
|
||||||
import seaborn as sns
|
import seaborn as sns
|
||||||
from numpy import median
|
from numpy import median
|
||||||
|
|
||||||
sns.set_style("whitegrid")
|
sns.set_style("whitegrid")
|
||||||
sns.set_palette("Blues_r")
|
sns.set_palette("Blues_r")
|
||||||
results = self.raw_results()
|
results = self.raw_results()
|
||||||
@@ -134,71 +151,76 @@ class BenchmarkRunner:
|
|||||||
obj_column = "Lower Bound"
|
obj_column = "Lower Bound"
|
||||||
predicted_obj_column = "Predicted LB"
|
predicted_obj_column = "Predicted LB"
|
||||||
|
|
||||||
fig, (ax1, ax2, ax3, ax4) = plt.subplots(nrows=1,
|
fig, (ax1, ax2, ax3, ax4) = plt.subplots(
|
||||||
ncols=4,
|
nrows=1,
|
||||||
figsize=(12,4),
|
ncols=4,
|
||||||
gridspec_kw={'width_ratios': [2, 1, 1, 2]})
|
figsize=(12, 4),
|
||||||
|
gridspec_kw={"width_ratios": [2, 1, 1, 2]},
|
||||||
|
)
|
||||||
|
|
||||||
# Figure 1: Solver x Wallclock Time
|
# Figure 1: Solver x Wallclock Time
|
||||||
sns.stripplot(x="Solver",
|
sns.stripplot(
|
||||||
y="Wallclock Time",
|
x="Solver",
|
||||||
data=results,
|
y="Wallclock Time",
|
||||||
ax=ax1,
|
data=results,
|
||||||
jitter=0.25,
|
ax=ax1,
|
||||||
size=4.0,
|
jitter=0.25,
|
||||||
)
|
size=4.0,
|
||||||
sns.barplot(x="Solver",
|
)
|
||||||
y="Wallclock Time",
|
sns.barplot(
|
||||||
data=results,
|
x="Solver",
|
||||||
ax=ax1,
|
y="Wallclock Time",
|
||||||
errwidth=0.,
|
data=results,
|
||||||
alpha=0.4,
|
ax=ax1,
|
||||||
estimator=median,
|
errwidth=0.0,
|
||||||
)
|
alpha=0.4,
|
||||||
ax1.set(ylabel='Wallclock Time (s)')
|
estimator=median,
|
||||||
|
)
|
||||||
|
ax1.set(ylabel="Wallclock Time (s)")
|
||||||
|
|
||||||
# Figure 2: Solver x Gap (%)
|
# Figure 2: Solver x Gap (%)
|
||||||
ax2.set_ylim(-0.5, 5.5)
|
ax2.set_ylim(-0.5, 5.5)
|
||||||
sns.stripplot(x="Solver",
|
sns.stripplot(
|
||||||
y="Gap (%)",
|
x="Solver",
|
||||||
jitter=0.25,
|
y="Gap (%)",
|
||||||
data=results[results["Mode"] != "heuristic"],
|
jitter=0.25,
|
||||||
ax=ax2,
|
data=results[results["Mode"] != "heuristic"],
|
||||||
size=4.0,
|
ax=ax2,
|
||||||
)
|
size=4.0,
|
||||||
|
)
|
||||||
|
|
||||||
# Figure 3: Solver x Primal Value
|
# Figure 3: Solver x Primal Value
|
||||||
ax3.set_ylim(0.95,1.05)
|
ax3.set_ylim(0.95, 1.05)
|
||||||
sns.stripplot(x="Solver",
|
sns.stripplot(
|
||||||
y=primal_column,
|
x="Solver",
|
||||||
jitter=0.25,
|
y=primal_column,
|
||||||
data=results[results["Mode"] == "heuristic"],
|
jitter=0.25,
|
||||||
ax=ax3,
|
data=results[results["Mode"] == "heuristic"],
|
||||||
)
|
ax=ax3,
|
||||||
|
)
|
||||||
|
|
||||||
# Figure 4: Predicted vs Actual Objective Value
|
# Figure 4: Predicted vs Actual Objective Value
|
||||||
sns.scatterplot(x=obj_column,
|
sns.scatterplot(
|
||||||
y=predicted_obj_column,
|
x=obj_column,
|
||||||
hue="Solver",
|
y=predicted_obj_column,
|
||||||
data=results[results["Mode"] != "heuristic"],
|
hue="Solver",
|
||||||
ax=ax4,
|
data=results[results["Mode"] != "heuristic"],
|
||||||
)
|
ax=ax4,
|
||||||
|
)
|
||||||
xlim, ylim = ax4.get_xlim(), ax4.get_ylim()
|
xlim, ylim = ax4.get_xlim(), ax4.get_ylim()
|
||||||
ax4.plot([-1e10, 1e10], [-1e10, 1e10], ls='-', color="#cccccc")
|
ax4.plot([-1e10, 1e10], [-1e10, 1e10], ls="-", color="#cccccc")
|
||||||
ax4.set_xlim(xlim)
|
ax4.set_xlim(xlim)
|
||||||
ax4.set_ylim(ylim)
|
ax4.set_ylim(ylim)
|
||||||
ax4.get_legend().remove()
|
ax4.get_legend().remove()
|
||||||
|
|
||||||
fig.tight_layout()
|
fig.tight_layout()
|
||||||
plt.savefig(filename, bbox_inches='tight', dpi=150)
|
plt.savefig(filename, bbox_inches="tight", dpi=150)
|
||||||
|
|
||||||
def _silence_miplearn_logger(self):
|
def _silence_miplearn_logger(self):
|
||||||
miplearn_logger = logging.getLogger("miplearn")
|
miplearn_logger = logging.getLogger("miplearn")
|
||||||
self.prev_log_level = miplearn_logger.getEffectiveLevel()
|
self.prev_log_level = miplearn_logger.getEffectiveLevel()
|
||||||
miplearn_logger.setLevel(logging.WARNING)
|
miplearn_logger.setLevel(logging.WARNING)
|
||||||
|
|
||||||
def _restore_miplearn_logger(self):
|
def _restore_miplearn_logger(self):
|
||||||
miplearn_logger = logging.getLogger("miplearn")
|
miplearn_logger = logging.getLogger("miplearn")
|
||||||
miplearn_logger.setLevel(self.prev_log_level)
|
miplearn_logger.setLevel(self.prev_log_level)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -22,9 +22,11 @@ class AdaptiveClassifier(Classifier):
|
|||||||
based on its cross-validation score on a particular training data set.
|
based on its cross-validation score on a particular training data set.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self,
|
def __init__(
|
||||||
candidates=None,
|
self,
|
||||||
evaluator=ClassifierEvaluator()):
|
candidates=None,
|
||||||
|
evaluator=ClassifierEvaluator(),
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
Initializes the meta-classifier.
|
Initializes the meta-classifier.
|
||||||
"""
|
"""
|
||||||
@@ -35,14 +37,13 @@ class AdaptiveClassifier(Classifier):
|
|||||||
"min samples": 100,
|
"min samples": 100,
|
||||||
},
|
},
|
||||||
"logistic": {
|
"logistic": {
|
||||||
"classifier": make_pipeline(StandardScaler(),
|
"classifier": make_pipeline(StandardScaler(), LogisticRegression()),
|
||||||
LogisticRegression()),
|
|
||||||
"min samples": 30,
|
"min samples": 30,
|
||||||
},
|
},
|
||||||
"counting": {
|
"counting": {
|
||||||
"classifier": CountingClassifier(),
|
"classifier": CountingClassifier(),
|
||||||
"min samples": 0,
|
"min samples": 0,
|
||||||
}
|
},
|
||||||
}
|
}
|
||||||
self.candidates = candidates
|
self.candidates = candidates
|
||||||
self.evaluator = evaluator
|
self.evaluator = evaluator
|
||||||
|
|||||||
@@ -21,8 +21,7 @@ class CountingClassifier(Classifier):
|
|||||||
self.mean = np.mean(y_train)
|
self.mean = np.mean(y_train)
|
||||||
|
|
||||||
def predict_proba(self, x_test):
|
def predict_proba(self, x_test):
|
||||||
return np.array([[1 - self.mean, self.mean]
|
return np.array([[1 - self.mean, self.mean] for _ in range(x_test.shape[0])])
|
||||||
for _ in range(x_test.shape[0])])
|
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return "CountingClassifier(mean=%s)" % self.mean
|
return "CountingClassifier(mean=%s)" % self.mean
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ from sklearn.linear_model import LogisticRegression
|
|||||||
from sklearn.model_selection import cross_val_score
|
from sklearn.model_selection import cross_val_score
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
@@ -28,12 +29,14 @@ class CrossValidatedClassifier(Classifier):
|
|||||||
acceptable. Other numbers are a linear interpolation of these two extremes.
|
acceptable. Other numbers are a linear interpolation of these two extremes.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self,
|
def __init__(
|
||||||
classifier=LogisticRegression(),
|
self,
|
||||||
threshold=0.75,
|
classifier=LogisticRegression(),
|
||||||
constant=0.0,
|
threshold=0.75,
|
||||||
cv=5,
|
constant=0.0,
|
||||||
scoring='accuracy'):
|
cv=5,
|
||||||
|
scoring="accuracy",
|
||||||
|
):
|
||||||
self.classifier = None
|
self.classifier = None
|
||||||
self.classifier_prototype = classifier
|
self.classifier_prototype = classifier
|
||||||
self.constant = constant
|
self.constant = constant
|
||||||
@@ -45,24 +48,36 @@ class CrossValidatedClassifier(Classifier):
|
|||||||
# Calculate dummy score and absolute score threshold
|
# Calculate dummy score and absolute score threshold
|
||||||
y_train_avg = np.average(y_train)
|
y_train_avg = np.average(y_train)
|
||||||
dummy_score = max(y_train_avg, 1 - y_train_avg)
|
dummy_score = max(y_train_avg, 1 - y_train_avg)
|
||||||
absolute_threshold = 1. * self.threshold + dummy_score * (1 - self.threshold)
|
absolute_threshold = 1.0 * self.threshold + dummy_score * (1 - self.threshold)
|
||||||
|
|
||||||
# Calculate cross validation score and decide which classifier to use
|
# Calculate cross validation score and decide which classifier to use
|
||||||
clf = deepcopy(self.classifier_prototype)
|
clf = deepcopy(self.classifier_prototype)
|
||||||
cv_score = float(np.mean(cross_val_score(clf,
|
cv_score = float(
|
||||||
x_train,
|
np.mean(
|
||||||
y_train,
|
cross_val_score(
|
||||||
cv=self.cv,
|
clf,
|
||||||
scoring=self.scoring)))
|
x_train,
|
||||||
|
y_train,
|
||||||
|
cv=self.cv,
|
||||||
|
scoring=self.scoring,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
)
|
||||||
if cv_score >= absolute_threshold:
|
if cv_score >= absolute_threshold:
|
||||||
logger.debug("cv_score is above threshold (%.2f >= %.2f); keeping" %
|
logger.debug(
|
||||||
(cv_score, absolute_threshold))
|
"cv_score is above threshold (%.2f >= %.2f); keeping"
|
||||||
|
% (cv_score, absolute_threshold)
|
||||||
|
)
|
||||||
self.classifier = clf
|
self.classifier = clf
|
||||||
else:
|
else:
|
||||||
logger.debug("cv_score is below threshold (%.2f < %.2f); discarding" %
|
logger.debug(
|
||||||
(cv_score, absolute_threshold))
|
"cv_score is below threshold (%.2f < %.2f); discarding"
|
||||||
self.classifier = DummyClassifier(strategy="constant",
|
% (cv_score, absolute_threshold)
|
||||||
constant=self.constant)
|
)
|
||||||
|
self.classifier = DummyClassifier(
|
||||||
|
strategy="constant",
|
||||||
|
constant=self.constant,
|
||||||
|
)
|
||||||
|
|
||||||
# Train chosen classifier
|
# Train chosen classifier
|
||||||
self.classifier.fit(x_train, y_train)
|
self.classifier.fit(x_train, y_train)
|
||||||
|
|||||||
@@ -12,7 +12,6 @@ E = 0.1
|
|||||||
def test_counting():
|
def test_counting():
|
||||||
clf = CountingClassifier()
|
clf = CountingClassifier()
|
||||||
clf.fit(np.zeros((8, 25)), [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0])
|
clf.fit(np.zeros((8, 25)), [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0])
|
||||||
expected_proba = np.array([[0.375, 0.625],
|
expected_proba = np.array([[0.375, 0.625], [0.375, 0.625]])
|
||||||
[0.375, 0.625]])
|
|
||||||
actual_proba = clf.predict_proba(np.zeros((2, 25)))
|
actual_proba = clf.predict_proba(np.zeros((2, 25)))
|
||||||
assert norm(actual_proba - expected_proba) < E
|
assert norm(actual_proba - expected_proba) < E
|
||||||
|
|||||||
@@ -13,34 +13,36 @@ E = 0.1
|
|||||||
|
|
||||||
def test_cv():
|
def test_cv():
|
||||||
# Training set: label is true if point is inside a 2D circle
|
# Training set: label is true if point is inside a 2D circle
|
||||||
x_train = np.array([[x1, x2]
|
x_train = np.array([[x1, x2] for x1 in range(-10, 11) for x2 in range(-10, 11)])
|
||||||
for x1 in range(-10, 11)
|
|
||||||
for x2 in range(-10, 11)])
|
|
||||||
x_train = StandardScaler().fit_transform(x_train)
|
x_train = StandardScaler().fit_transform(x_train)
|
||||||
n_samples = x_train.shape[0]
|
n_samples = x_train.shape[0]
|
||||||
|
|
||||||
y_train = np.array([1.0 if x1*x1 + x2*x2 <= 100 else 0.0
|
y_train = np.array(
|
||||||
for x1 in range(-10, 11)
|
[
|
||||||
for x2 in range(-10, 11)])
|
1.0 if x1 * x1 + x2 * x2 <= 100 else 0.0
|
||||||
|
for x1 in range(-10, 11)
|
||||||
|
for x2 in range(-10, 11)
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
# Support vector machines with linear kernels do not perform well on this
|
# Support vector machines with linear kernels do not perform well on this
|
||||||
# data set, so predictor should return the given constant.
|
# data set, so predictor should return the given constant.
|
||||||
clf = CrossValidatedClassifier(classifier=SVC(probability=True,
|
clf = CrossValidatedClassifier(
|
||||||
random_state=42),
|
classifier=SVC(probability=True, random_state=42),
|
||||||
threshold=0.90,
|
threshold=0.90,
|
||||||
constant=0.0,
|
constant=0.0,
|
||||||
cv=30)
|
cv=30,
|
||||||
|
)
|
||||||
clf.fit(x_train, y_train)
|
clf.fit(x_train, y_train)
|
||||||
assert norm(np.zeros(n_samples) - clf.predict(x_train)) < E
|
assert norm(np.zeros(n_samples) - clf.predict(x_train)) < E
|
||||||
|
|
||||||
# Support vector machines with quadratic kernels perform almost perfectly
|
# Support vector machines with quadratic kernels perform almost perfectly
|
||||||
# on this data set, so predictor should return their prediction.
|
# on this data set, so predictor should return their prediction.
|
||||||
clf = CrossValidatedClassifier(classifier=SVC(probability=True,
|
clf = CrossValidatedClassifier(
|
||||||
kernel='poly',
|
classifier=SVC(probability=True, kernel="poly", degree=2, random_state=42),
|
||||||
degree=2,
|
threshold=0.90,
|
||||||
random_state=42),
|
cv=30,
|
||||||
threshold=0.90,
|
)
|
||||||
cv=30)
|
|
||||||
clf.fit(x_train, y_train)
|
clf.fit(x_train, y_train)
|
||||||
print(y_train - clf.predict(x_train))
|
print(y_train - clf.predict(x_train))
|
||||||
assert norm(y_train - clf.predict(x_train)) < E
|
assert norm(y_train - clf.predict(x_train)) < E
|
||||||
|
|||||||
@@ -17,4 +17,3 @@ def test_evaluator():
|
|||||||
ev = ClassifierEvaluator()
|
ev = ClassifierEvaluator()
|
||||||
assert ev.evaluate(clf_a, x_train, y_train) == 1.0
|
assert ev.evaluate(clf_a, x_train, y_train) == 1.0
|
||||||
assert ev.evaluate(clf_b, x_train, y_train) == 0.5
|
assert ev.evaluate(clf_b, x_train, y_train) == 0.5
|
||||||
|
|
||||||
|
|||||||
@@ -11,12 +11,16 @@ from miplearn.classifiers.threshold import MinPrecisionThreshold
|
|||||||
|
|
||||||
def test_threshold_dynamic():
|
def test_threshold_dynamic():
|
||||||
clf = Mock(spec=Classifier)
|
clf = Mock(spec=Classifier)
|
||||||
clf.predict_proba = Mock(return_value=np.array([
|
clf.predict_proba = Mock(
|
||||||
[0.10, 0.90],
|
return_value=np.array(
|
||||||
[0.10, 0.90],
|
[
|
||||||
[0.20, 0.80],
|
[0.10, 0.90],
|
||||||
[0.30, 0.70],
|
[0.10, 0.90],
|
||||||
]))
|
[0.20, 0.80],
|
||||||
|
[0.30, 0.70],
|
||||||
|
]
|
||||||
|
)
|
||||||
|
)
|
||||||
x_train = np.array([0, 1, 2, 3])
|
x_train = np.array([0, 1, 2, 3])
|
||||||
y_train = np.array([1, 1, 0, 0])
|
y_train = np.array([1, 1, 0, 0])
|
||||||
|
|
||||||
@@ -31,4 +35,3 @@ def test_threshold_dynamic():
|
|||||||
|
|
||||||
threshold = MinPrecisionThreshold(min_precision=0.00)
|
threshold = MinPrecisionThreshold(min_precision=0.00)
|
||||||
assert threshold.find(clf, x_train, y_train) == 0.70
|
assert threshold.find(clf, x_train, y_train) == 0.70
|
||||||
|
|
||||||
|
|||||||
@@ -30,11 +30,15 @@ class MinPrecisionThreshold(DynamicThreshold):
|
|||||||
def find(self, clf, x_train, y_train):
|
def find(self, clf, x_train, y_train):
|
||||||
proba = clf.predict_proba(x_train)
|
proba = clf.predict_proba(x_train)
|
||||||
|
|
||||||
assert isinstance(proba, np.ndarray), \
|
assert isinstance(proba, np.ndarray), "classifier should return numpy array"
|
||||||
"classifier should return numpy array"
|
assert proba.shape == (
|
||||||
assert proba.shape == (x_train.shape[0], 2), \
|
x_train.shape[0],
|
||||||
"classifier should return (%d,%d)-shaped array, not %s" % (
|
2,
|
||||||
x_train.shape[0], 2, str(proba.shape))
|
), "classifier should return (%d,%d)-shaped array, not %s" % (
|
||||||
|
x_train.shape[0],
|
||||||
|
2,
|
||||||
|
str(proba.shape),
|
||||||
|
)
|
||||||
|
|
||||||
fps, tps, thresholds = _binary_clf_curve(y_train, proba[:, 1])
|
fps, tps, thresholds = _binary_clf_curve(y_train, proba[:, 1])
|
||||||
precision = tps / (tps + fps)
|
precision = tps / (tps + fps)
|
||||||
|
|||||||
@@ -9,15 +9,15 @@ class Component(ABC):
|
|||||||
"""
|
"""
|
||||||
A Component is an object which adds functionality to a LearningSolver.
|
A Component is an object which adds functionality to a LearningSolver.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def before_solve(self, solver, instance, model):
|
def before_solve(self, solver, instance, model):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def after_solve(self, solver, instance, model, results):
|
def after_solve(self, solver, instance, model, results):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def fit(self, training_instances):
|
def fit(self, training_instances):
|
||||||
pass
|
pass
|
||||||
|
|||||||
@@ -18,10 +18,12 @@ class UserCutsComponent(Component):
|
|||||||
"""
|
"""
|
||||||
A component that predicts which user cuts to enforce.
|
A component that predicts which user cuts to enforce.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self,
|
def __init__(
|
||||||
classifier=CountingClassifier(),
|
self,
|
||||||
threshold=0.05):
|
classifier=CountingClassifier(),
|
||||||
|
threshold=0.05,
|
||||||
|
):
|
||||||
self.violations = set()
|
self.violations = set()
|
||||||
self.count = {}
|
self.count = {}
|
||||||
self.n_samples = 0
|
self.n_samples = 0
|
||||||
@@ -40,7 +42,7 @@ class UserCutsComponent(Component):
|
|||||||
|
|
||||||
def after_solve(self, solver, instance, model, results):
|
def after_solve(self, solver, instance, model, results):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def fit(self, training_instances):
|
def fit(self, training_instances):
|
||||||
logger.debug("Fitting...")
|
logger.debug("Fitting...")
|
||||||
features = InstanceFeaturesExtractor().extract(training_instances)
|
features = InstanceFeaturesExtractor().extract(training_instances)
|
||||||
@@ -56,10 +58,11 @@ class UserCutsComponent(Component):
|
|||||||
violation_to_instance_idx[v] = []
|
violation_to_instance_idx[v] = []
|
||||||
violation_to_instance_idx[v] += [idx]
|
violation_to_instance_idx[v] += [idx]
|
||||||
|
|
||||||
for (v, classifier) in tqdm(self.classifiers.items(),
|
for (v, classifier) in tqdm(
|
||||||
desc="Fit (user cuts)",
|
self.classifiers.items(),
|
||||||
disable=not sys.stdout.isatty(),
|
desc="Fit (user cuts)",
|
||||||
):
|
disable=not sys.stdout.isatty(),
|
||||||
|
):
|
||||||
logger.debug("Training: %s" % (str(v)))
|
logger.debug("Training: %s" % (str(v)))
|
||||||
label = np.zeros(len(training_instances))
|
label = np.zeros(len(training_instances))
|
||||||
label[violation_to_instance_idx[v]] = 1.0
|
label[violation_to_instance_idx[v]] = 1.0
|
||||||
@@ -79,10 +82,11 @@ class UserCutsComponent(Component):
|
|||||||
all_violations = set()
|
all_violations = set()
|
||||||
for instance in instances:
|
for instance in instances:
|
||||||
all_violations |= set(instance.found_violated_user_cuts)
|
all_violations |= set(instance.found_violated_user_cuts)
|
||||||
for idx in tqdm(range(len(instances)),
|
for idx in tqdm(
|
||||||
desc="Evaluate (lazy)",
|
range(len(instances)),
|
||||||
disable=not sys.stdout.isatty(),
|
desc="Evaluate (lazy)",
|
||||||
):
|
disable=not sys.stdout.isatty(),
|
||||||
|
):
|
||||||
instance = instances[idx]
|
instance = instances[idx]
|
||||||
condition_positive = set(instance.found_violated_user_cuts)
|
condition_positive = set(instance.found_violated_user_cuts)
|
||||||
condition_negative = all_violations - condition_positive
|
condition_negative = all_violations - condition_positive
|
||||||
|
|||||||
@@ -18,10 +18,12 @@ class DynamicLazyConstraintsComponent(Component):
|
|||||||
"""
|
"""
|
||||||
A component that predicts which lazy constraints to enforce.
|
A component that predicts which lazy constraints to enforce.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self,
|
def __init__(
|
||||||
classifier=CountingClassifier(),
|
self,
|
||||||
threshold=0.05):
|
classifier=CountingClassifier(),
|
||||||
|
threshold=0.05,
|
||||||
|
):
|
||||||
self.violations = set()
|
self.violations = set()
|
||||||
self.count = {}
|
self.count = {}
|
||||||
self.n_samples = 0
|
self.n_samples = 0
|
||||||
@@ -52,7 +54,7 @@ class DynamicLazyConstraintsComponent(Component):
|
|||||||
|
|
||||||
def after_solve(self, solver, instance, model, results):
|
def after_solve(self, solver, instance, model, results):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def fit(self, training_instances):
|
def fit(self, training_instances):
|
||||||
logger.debug("Fitting...")
|
logger.debug("Fitting...")
|
||||||
features = InstanceFeaturesExtractor().extract(training_instances)
|
features = InstanceFeaturesExtractor().extract(training_instances)
|
||||||
@@ -68,10 +70,11 @@ class DynamicLazyConstraintsComponent(Component):
|
|||||||
violation_to_instance_idx[v] = []
|
violation_to_instance_idx[v] = []
|
||||||
violation_to_instance_idx[v] += [idx]
|
violation_to_instance_idx[v] += [idx]
|
||||||
|
|
||||||
for (v, classifier) in tqdm(self.classifiers.items(),
|
for (v, classifier) in tqdm(
|
||||||
desc="Fit (lazy)",
|
self.classifiers.items(),
|
||||||
disable=not sys.stdout.isatty(),
|
desc="Fit (lazy)",
|
||||||
):
|
disable=not sys.stdout.isatty(),
|
||||||
|
):
|
||||||
logger.debug("Training: %s" % (str(v)))
|
logger.debug("Training: %s" % (str(v)))
|
||||||
label = np.zeros(len(training_instances))
|
label = np.zeros(len(training_instances))
|
||||||
label[violation_to_instance_idx[v]] = 1.0
|
label[violation_to_instance_idx[v]] = 1.0
|
||||||
@@ -91,10 +94,11 @@ class DynamicLazyConstraintsComponent(Component):
|
|||||||
all_violations = set()
|
all_violations = set()
|
||||||
for instance in instances:
|
for instance in instances:
|
||||||
all_violations |= set(instance.found_violated_lazy_constraints)
|
all_violations |= set(instance.found_violated_lazy_constraints)
|
||||||
for idx in tqdm(range(len(instances)),
|
for idx in tqdm(
|
||||||
desc="Evaluate (lazy)",
|
range(len(instances)),
|
||||||
disable=not sys.stdout.isatty(),
|
desc="Evaluate (lazy)",
|
||||||
):
|
disable=not sys.stdout.isatty(),
|
||||||
|
):
|
||||||
instance = instances[idx]
|
instance = instances[idx]
|
||||||
condition_positive = set(instance.found_violated_lazy_constraints)
|
condition_positive = set(instance.found_violated_lazy_constraints)
|
||||||
condition_negative = all_violations - condition_positive
|
condition_negative = all_violations - condition_positive
|
||||||
|
|||||||
@@ -19,13 +19,14 @@ class LazyConstraint:
|
|||||||
|
|
||||||
|
|
||||||
class StaticLazyConstraintsComponent(Component):
|
class StaticLazyConstraintsComponent(Component):
|
||||||
def __init__(self,
|
def __init__(
|
||||||
classifier=CountingClassifier(),
|
self,
|
||||||
threshold=0.05,
|
classifier=CountingClassifier(),
|
||||||
use_two_phase_gap=True,
|
threshold=0.05,
|
||||||
large_gap=1e-2,
|
use_two_phase_gap=True,
|
||||||
violation_tolerance=-0.5,
|
large_gap=1e-2,
|
||||||
):
|
violation_tolerance=-0.5,
|
||||||
|
):
|
||||||
self.threshold = threshold
|
self.threshold = threshold
|
||||||
self.classifier_prototype = classifier
|
self.classifier_prototype = classifier
|
||||||
self.classifiers = {}
|
self.classifiers = {}
|
||||||
@@ -74,32 +75,38 @@ class StaticLazyConstraintsComponent(Component):
|
|||||||
logger.debug("Finding violated lazy constraints...")
|
logger.debug("Finding violated lazy constraints...")
|
||||||
constraints_to_add = []
|
constraints_to_add = []
|
||||||
for c in self.pool:
|
for c in self.pool:
|
||||||
if not solver.internal_solver.is_constraint_satisfied(c.obj,
|
if not solver.internal_solver.is_constraint_satisfied(
|
||||||
tol=self.violation_tolerance):
|
c.obj, tol=self.violation_tolerance
|
||||||
|
):
|
||||||
constraints_to_add.append(c)
|
constraints_to_add.append(c)
|
||||||
for c in constraints_to_add:
|
for c in constraints_to_add:
|
||||||
self.pool.remove(c)
|
self.pool.remove(c)
|
||||||
solver.internal_solver.add_constraint(c.obj)
|
solver.internal_solver.add_constraint(c.obj)
|
||||||
instance.found_violated_lazy_constraints += [c.cid]
|
instance.found_violated_lazy_constraints += [c.cid]
|
||||||
if len(constraints_to_add) > 0:
|
if len(constraints_to_add) > 0:
|
||||||
logger.info("%8d lazy constraints added %8d in the pool" % (len(constraints_to_add), len(self.pool)))
|
logger.info(
|
||||||
|
"%8d lazy constraints added %8d in the pool"
|
||||||
|
% (len(constraints_to_add), len(self.pool))
|
||||||
|
)
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def fit(self, training_instances):
|
def fit(self, training_instances):
|
||||||
training_instances = [t
|
training_instances = [
|
||||||
for t in training_instances
|
t
|
||||||
if hasattr(t, "found_violated_lazy_constraints")]
|
for t in training_instances
|
||||||
|
if hasattr(t, "found_violated_lazy_constraints")
|
||||||
|
]
|
||||||
|
|
||||||
logger.debug("Extracting x and y...")
|
logger.debug("Extracting x and y...")
|
||||||
x = self.x(training_instances)
|
x = self.x(training_instances)
|
||||||
y = self.y(training_instances)
|
y = self.y(training_instances)
|
||||||
|
|
||||||
logger.debug("Fitting...")
|
logger.debug("Fitting...")
|
||||||
for category in tqdm(x.keys(),
|
for category in tqdm(
|
||||||
desc="Fit (lazy)",
|
x.keys(), desc="Fit (lazy)", disable=not sys.stdout.isatty()
|
||||||
disable=not sys.stdout.isatty()):
|
):
|
||||||
if category not in self.classifiers:
|
if category not in self.classifiers:
|
||||||
self.classifiers[category] = deepcopy(self.classifier_prototype)
|
self.classifiers[category] = deepcopy(self.classifier_prototype)
|
||||||
self.classifiers[category].fit(x[category], y[category])
|
self.classifiers[category].fit(x[category], y[category])
|
||||||
@@ -121,8 +128,10 @@ class StaticLazyConstraintsComponent(Component):
|
|||||||
x[category] = []
|
x[category] = []
|
||||||
constraints[category] = []
|
constraints[category] = []
|
||||||
x[category] += [instance.get_constraint_features(cid)]
|
x[category] += [instance.get_constraint_features(cid)]
|
||||||
c = LazyConstraint(cid=cid,
|
c = LazyConstraint(
|
||||||
obj=solver.internal_solver.extract_constraint(cid))
|
cid=cid,
|
||||||
|
obj=solver.internal_solver.extract_constraint(cid),
|
||||||
|
)
|
||||||
constraints[category] += [c]
|
constraints[category] += [c]
|
||||||
self.pool.append(c)
|
self.pool.append(c)
|
||||||
logger.info("%8d lazy constraints extracted" % len(self.pool))
|
logger.info("%8d lazy constraints extracted" % len(self.pool))
|
||||||
@@ -141,7 +150,13 @@ class StaticLazyConstraintsComponent(Component):
|
|||||||
self.pool.remove(c)
|
self.pool.remove(c)
|
||||||
solver.internal_solver.add_constraint(c.obj)
|
solver.internal_solver.add_constraint(c.obj)
|
||||||
instance.found_violated_lazy_constraints += [c.cid]
|
instance.found_violated_lazy_constraints += [c.cid]
|
||||||
logger.info("%8d lazy constraints added %8d in the pool" % (n_added, len(self.pool)))
|
logger.info(
|
||||||
|
"%8d lazy constraints added %8d in the pool"
|
||||||
|
% (
|
||||||
|
n_added,
|
||||||
|
len(self.pool),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
def _collect_constraints(self, train_instances):
|
def _collect_constraints(self, train_instances):
|
||||||
constraints = {}
|
constraints = {}
|
||||||
|
|||||||
@@ -1,13 +1,20 @@
|
|||||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||||
# Released under the modified BSD license. See COPYING.md for more details.
|
# Released under the modified BSD license. See COPYING.md for more details.
|
||||||
from sklearn.metrics import mean_squared_error, explained_variance_score, max_error, mean_absolute_error, r2_score
|
from sklearn.metrics import (
|
||||||
|
mean_squared_error,
|
||||||
|
explained_variance_score,
|
||||||
|
max_error,
|
||||||
|
mean_absolute_error,
|
||||||
|
r2_score,
|
||||||
|
)
|
||||||
|
|
||||||
from .. import Component, InstanceFeaturesExtractor, ObjectiveValueExtractor
|
from .. import Component, InstanceFeaturesExtractor, ObjectiveValueExtractor
|
||||||
from sklearn.linear_model import LinearRegression
|
from sklearn.linear_model import LinearRegression
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
@@ -15,12 +22,12 @@ class ObjectiveValueComponent(Component):
|
|||||||
"""
|
"""
|
||||||
A Component which predicts the optimal objective value of the problem.
|
A Component which predicts the optimal objective value of the problem.
|
||||||
"""
|
"""
|
||||||
def __init__(self,
|
|
||||||
regressor=LinearRegression()):
|
def __init__(self, regressor=LinearRegression()):
|
||||||
self.ub_regressor = None
|
self.ub_regressor = None
|
||||||
self.lb_regressor = None
|
self.lb_regressor = None
|
||||||
self.regressor_prototype = regressor
|
self.regressor_prototype = regressor
|
||||||
|
|
||||||
def before_solve(self, solver, instance, model):
|
def before_solve(self, solver, instance, model):
|
||||||
if self.ub_regressor is not None:
|
if self.ub_regressor is not None:
|
||||||
logger.info("Predicting optimal value...")
|
logger.info("Predicting optimal value...")
|
||||||
@@ -28,7 +35,7 @@ class ObjectiveValueComponent(Component):
|
|||||||
instance.predicted_ub = ub
|
instance.predicted_ub = ub
|
||||||
instance.predicted_lb = lb
|
instance.predicted_lb = lb
|
||||||
logger.info("Predicted values: lb=%.2f, ub=%.2f" % (lb, ub))
|
logger.info("Predicted values: lb=%.2f, ub=%.2f" % (lb, ub))
|
||||||
|
|
||||||
def after_solve(self, solver, instance, model, results):
|
def after_solve(self, solver, instance, model, results):
|
||||||
if self.ub_regressor is not None:
|
if self.ub_regressor is not None:
|
||||||
results["Predicted UB"] = instance.predicted_ub
|
results["Predicted UB"] = instance.predicted_ub
|
||||||
@@ -36,7 +43,7 @@ class ObjectiveValueComponent(Component):
|
|||||||
else:
|
else:
|
||||||
results["Predicted UB"] = None
|
results["Predicted UB"] = None
|
||||||
results["Predicted LB"] = None
|
results["Predicted LB"] = None
|
||||||
|
|
||||||
def fit(self, training_instances):
|
def fit(self, training_instances):
|
||||||
logger.debug("Extracting features...")
|
logger.debug("Extracting features...")
|
||||||
features = InstanceFeaturesExtractor().extract(training_instances)
|
features = InstanceFeaturesExtractor().extract(training_instances)
|
||||||
@@ -50,7 +57,7 @@ class ObjectiveValueComponent(Component):
|
|||||||
self.ub_regressor.fit(features, ub.ravel())
|
self.ub_regressor.fit(features, ub.ravel())
|
||||||
logger.debug("Fitting ub_regressor...")
|
logger.debug("Fitting ub_regressor...")
|
||||||
self.lb_regressor.fit(features, lb.ravel())
|
self.lb_regressor.fit(features, lb.ravel())
|
||||||
|
|
||||||
def predict(self, instances):
|
def predict(self, instances):
|
||||||
features = InstanceFeaturesExtractor().extract(instances)
|
features = InstanceFeaturesExtractor().extract(instances)
|
||||||
lb = self.lb_regressor.predict(features)
|
lb = self.lb_regressor.predict(features)
|
||||||
|
|||||||
@@ -19,10 +19,12 @@ class PrimalSolutionComponent(Component):
|
|||||||
A component that predicts primal solutions.
|
A component that predicts primal solutions.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self,
|
def __init__(
|
||||||
classifier=AdaptiveClassifier(),
|
self,
|
||||||
mode="exact",
|
classifier=AdaptiveClassifier(),
|
||||||
threshold=MinPrecisionThreshold(0.98)):
|
mode="exact",
|
||||||
|
threshold=MinPrecisionThreshold(0.98),
|
||||||
|
):
|
||||||
self.mode = mode
|
self.mode = mode
|
||||||
self.classifiers = {}
|
self.classifiers = {}
|
||||||
self.thresholds = {}
|
self.thresholds = {}
|
||||||
@@ -51,9 +53,10 @@ class PrimalSolutionComponent(Component):
|
|||||||
features = VariableFeaturesExtractor().extract(training_instances)
|
features = VariableFeaturesExtractor().extract(training_instances)
|
||||||
solutions = SolutionExtractor().extract(training_instances)
|
solutions = SolutionExtractor().extract(training_instances)
|
||||||
|
|
||||||
for category in tqdm(features.keys(),
|
for category in tqdm(
|
||||||
desc="Fit (primal)",
|
features.keys(),
|
||||||
):
|
desc="Fit (primal)",
|
||||||
|
):
|
||||||
x_train = features[category]
|
x_train = features[category]
|
||||||
for label in [0, 1]:
|
for label in [0, 1]:
|
||||||
y_train = solutions[category][:, label].astype(int)
|
y_train = solutions[category][:, label].astype(int)
|
||||||
@@ -74,9 +77,15 @@ class PrimalSolutionComponent(Component):
|
|||||||
|
|
||||||
# Find threshold (dynamic or static)
|
# Find threshold (dynamic or static)
|
||||||
if isinstance(self.threshold_prototype, DynamicThreshold):
|
if isinstance(self.threshold_prototype, DynamicThreshold):
|
||||||
self.thresholds[category, label] = self.threshold_prototype.find(clf, x_train, y_train)
|
self.thresholds[category, label] = self.threshold_prototype.find(
|
||||||
|
clf,
|
||||||
|
x_train,
|
||||||
|
y_train,
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
self.thresholds[category, label] = deepcopy(self.threshold_prototype)
|
self.thresholds[category, label] = deepcopy(
|
||||||
|
self.threshold_prototype
|
||||||
|
)
|
||||||
|
|
||||||
self.classifiers[category, label] = clf
|
self.classifiers[category, label] = clf
|
||||||
|
|
||||||
@@ -98,18 +107,21 @@ class PrimalSolutionComponent(Component):
|
|||||||
ws = np.array([[1 - clf, clf] for _ in range(n)])
|
ws = np.array([[1 - clf, clf] for _ in range(n)])
|
||||||
else:
|
else:
|
||||||
ws = clf.predict_proba(x_test[category])
|
ws = clf.predict_proba(x_test[category])
|
||||||
assert ws.shape == (n, 2), "ws.shape should be (%d, 2) not %s" % (n, ws.shape)
|
assert ws.shape == (n, 2), "ws.shape should be (%d, 2) not %s" % (
|
||||||
|
n,
|
||||||
|
ws.shape,
|
||||||
|
)
|
||||||
for (i, (var, index)) in enumerate(var_split[category]):
|
for (i, (var, index)) in enumerate(var_split[category]):
|
||||||
if ws[i, 1] >= self.thresholds[category, label]:
|
if ws[i, 1] >= self.thresholds[category, label]:
|
||||||
solution[var][index] = label
|
solution[var][index] = label
|
||||||
return solution
|
return solution
|
||||||
|
|
||||||
def evaluate(self, instances):
|
def evaluate(self, instances):
|
||||||
ev = {"Fix zero": {},
|
ev = {"Fix zero": {}, "Fix one": {}}
|
||||||
"Fix one": {}}
|
for instance_idx in tqdm(
|
||||||
for instance_idx in tqdm(range(len(instances)),
|
range(len(instances)),
|
||||||
desc="Evaluate (primal)",
|
desc="Evaluate (primal)",
|
||||||
):
|
):
|
||||||
instance = instances[instance_idx]
|
instance = instances[instance_idx]
|
||||||
solution_actual = instance.solution
|
solution_actual = instance.solution
|
||||||
solution_pred = self.predict(instance)
|
solution_pred = self.predict(instance)
|
||||||
@@ -143,6 +155,10 @@ class PrimalSolutionComponent(Component):
|
|||||||
tn_one = len(pred_one_negative & vars_zero)
|
tn_one = len(pred_one_negative & vars_zero)
|
||||||
fn_one = len(pred_one_negative & vars_one)
|
fn_one = len(pred_one_negative & vars_one)
|
||||||
|
|
||||||
ev["Fix zero"][instance_idx] = classifier_evaluation_dict(tp_zero, tn_zero, fp_zero, fn_zero)
|
ev["Fix zero"][instance_idx] = classifier_evaluation_dict(
|
||||||
ev["Fix one"][instance_idx] = classifier_evaluation_dict(tp_one, tn_one, fp_one, fn_one)
|
tp_zero, tn_zero, fp_zero, fn_zero
|
||||||
|
)
|
||||||
|
ev["Fix one"][instance_idx] = classifier_evaluation_dict(
|
||||||
|
tp_one, tn_one, fp_one, fn_one
|
||||||
|
)
|
||||||
return ev
|
return ev
|
||||||
|
|||||||
@@ -51,14 +51,15 @@ class RelaxationComponent(Component):
|
|||||||
If `check_dropped` is true, set the maximum number of iterations in the lazy constraint loop.
|
If `check_dropped` is true, set the maximum number of iterations in the lazy constraint loop.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self,
|
def __init__(
|
||||||
classifier=CountingClassifier(),
|
self,
|
||||||
threshold=0.95,
|
classifier=CountingClassifier(),
|
||||||
slack_tolerance=1e-5,
|
threshold=0.95,
|
||||||
check_dropped=False,
|
slack_tolerance=1e-5,
|
||||||
violation_tolerance=1e-5,
|
check_dropped=False,
|
||||||
max_iterations=3,
|
violation_tolerance=1e-5,
|
||||||
):
|
max_iterations=3,
|
||||||
|
):
|
||||||
self.classifiers = {}
|
self.classifiers = {}
|
||||||
self.classifier_prototype = classifier
|
self.classifier_prototype = classifier
|
||||||
self.threshold = threshold
|
self.threshold = threshold
|
||||||
@@ -77,16 +78,20 @@ class RelaxationComponent(Component):
|
|||||||
|
|
||||||
logger.info("Predicting redundant LP constraints...")
|
logger.info("Predicting redundant LP constraints...")
|
||||||
cids = solver.internal_solver.get_constraint_ids()
|
cids = solver.internal_solver.get_constraint_ids()
|
||||||
x, constraints = self.x([instance],
|
x, constraints = self.x(
|
||||||
constraint_ids=cids,
|
[instance],
|
||||||
return_constraints=True)
|
constraint_ids=cids,
|
||||||
|
return_constraints=True,
|
||||||
|
)
|
||||||
y = self.predict(x)
|
y = self.predict(x)
|
||||||
for category in y.keys():
|
for category in y.keys():
|
||||||
for i in range(len(y[category])):
|
for i in range(len(y[category])):
|
||||||
if y[category][i][0] == 1:
|
if y[category][i][0] == 1:
|
||||||
cid = constraints[category][i]
|
cid = constraints[category][i]
|
||||||
c = LazyConstraint(cid=cid,
|
c = LazyConstraint(
|
||||||
obj=solver.internal_solver.extract_constraint(cid))
|
cid=cid,
|
||||||
|
obj=solver.internal_solver.extract_constraint(cid),
|
||||||
|
)
|
||||||
self.pool += [c]
|
self.pool += [c]
|
||||||
logger.info("Extracted %d predicted constraints" % len(self.pool))
|
logger.info("Extracted %d predicted constraints" % len(self.pool))
|
||||||
|
|
||||||
@@ -98,21 +103,19 @@ class RelaxationComponent(Component):
|
|||||||
x = self.x(training_instances)
|
x = self.x(training_instances)
|
||||||
y = self.y(training_instances)
|
y = self.y(training_instances)
|
||||||
logger.debug("Fitting...")
|
logger.debug("Fitting...")
|
||||||
for category in tqdm(x.keys(),
|
for category in tqdm(x.keys(), desc="Fit (relaxation)"):
|
||||||
desc="Fit (relaxation)"):
|
|
||||||
if category not in self.classifiers:
|
if category not in self.classifiers:
|
||||||
self.classifiers[category] = deepcopy(self.classifier_prototype)
|
self.classifiers[category] = deepcopy(self.classifier_prototype)
|
||||||
self.classifiers[category].fit(x[category], y[category])
|
self.classifiers[category].fit(x[category], y[category])
|
||||||
|
|
||||||
def x(self,
|
def x(self, instances, constraint_ids=None, return_constraints=False):
|
||||||
instances,
|
|
||||||
constraint_ids=None,
|
|
||||||
return_constraints=False):
|
|
||||||
x = {}
|
x = {}
|
||||||
constraints = {}
|
constraints = {}
|
||||||
for instance in tqdm(InstanceIterator(instances),
|
for instance in tqdm(
|
||||||
desc="Extract (relaxation:x)",
|
InstanceIterator(instances),
|
||||||
disable=len(instances) < 5):
|
desc="Extract (relaxation:x)",
|
||||||
|
disable=len(instances) < 5,
|
||||||
|
):
|
||||||
if constraint_ids is not None:
|
if constraint_ids is not None:
|
||||||
cids = constraint_ids
|
cids = constraint_ids
|
||||||
else:
|
else:
|
||||||
@@ -133,9 +136,11 @@ class RelaxationComponent(Component):
|
|||||||
|
|
||||||
def y(self, instances):
|
def y(self, instances):
|
||||||
y = {}
|
y = {}
|
||||||
for instance in tqdm(InstanceIterator(instances),
|
for instance in tqdm(
|
||||||
desc="Extract (relaxation:y)",
|
InstanceIterator(instances),
|
||||||
disable=len(instances) < 5):
|
desc="Extract (relaxation:y)",
|
||||||
|
disable=len(instances) < 5,
|
||||||
|
):
|
||||||
for (cid, slack) in instance.slacks.items():
|
for (cid, slack) in instance.slacks.items():
|
||||||
category = instance.get_constraint_category(cid)
|
category = instance.get_constraint_category(cid)
|
||||||
if category is None:
|
if category is None:
|
||||||
@@ -154,7 +159,7 @@ class RelaxationComponent(Component):
|
|||||||
if category not in self.classifiers:
|
if category not in self.classifiers:
|
||||||
continue
|
continue
|
||||||
y[category] = []
|
y[category] = []
|
||||||
#x_cat = np.array(x_cat)
|
# x_cat = np.array(x_cat)
|
||||||
proba = self.classifiers[category].predict_proba(x_cat)
|
proba = self.classifiers[category].predict_proba(x_cat)
|
||||||
for i in range(len(proba)):
|
for i in range(len(proba)):
|
||||||
if proba[i][1] >= self.threshold:
|
if proba[i][1] >= self.threshold:
|
||||||
@@ -191,13 +196,19 @@ class RelaxationComponent(Component):
|
|||||||
logger.debug("Checking that dropped constraints are satisfied...")
|
logger.debug("Checking that dropped constraints are satisfied...")
|
||||||
constraints_to_add = []
|
constraints_to_add = []
|
||||||
for c in self.pool:
|
for c in self.pool:
|
||||||
if not solver.internal_solver.is_constraint_satisfied(c.obj, self.violation_tolerance):
|
if not solver.internal_solver.is_constraint_satisfied(
|
||||||
|
c.obj,
|
||||||
|
self.violation_tolerance,
|
||||||
|
):
|
||||||
constraints_to_add.append(c)
|
constraints_to_add.append(c)
|
||||||
for c in constraints_to_add:
|
for c in constraints_to_add:
|
||||||
self.pool.remove(c)
|
self.pool.remove(c)
|
||||||
solver.internal_solver.add_constraint(c.obj)
|
solver.internal_solver.add_constraint(c.obj)
|
||||||
if len(constraints_to_add) > 0:
|
if len(constraints_to_add) > 0:
|
||||||
logger.info("%8d constraints %8d in the pool" % (len(constraints_to_add), len(self.pool)))
|
logger.info(
|
||||||
|
"%8d constraints %8d in the pool"
|
||||||
|
% (len(constraints_to_add), len(self.pool))
|
||||||
|
)
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
return False
|
return False
|
||||||
|
|||||||
@@ -28,9 +28,9 @@ def test_lazy_fit():
|
|||||||
assert "c" in component.classifiers
|
assert "c" in component.classifiers
|
||||||
|
|
||||||
# Should provide correct x_train to each classifier
|
# Should provide correct x_train to each classifier
|
||||||
expected_x_train_a = np.array([[67., 21.75, 1287.92], [70., 23.75, 1199.83]])
|
expected_x_train_a = np.array([[67.0, 21.75, 1287.92], [70.0, 23.75, 1199.83]])
|
||||||
expected_x_train_b = np.array([[67., 21.75, 1287.92], [70., 23.75, 1199.83]])
|
expected_x_train_b = np.array([[67.0, 21.75, 1287.92], [70.0, 23.75, 1199.83]])
|
||||||
expected_x_train_c = np.array([[67., 21.75, 1287.92], [70., 23.75, 1199.83]])
|
expected_x_train_c = np.array([[67.0, 21.75, 1287.92], [70.0, 23.75, 1199.83]])
|
||||||
actual_x_train_a = component.classifiers["a"].fit.call_args[0][0]
|
actual_x_train_a = component.classifiers["a"].fit.call_args[0][0]
|
||||||
actual_x_train_b = component.classifiers["b"].fit.call_args[0][0]
|
actual_x_train_b = component.classifiers["b"].fit.call_args[0][0]
|
||||||
actual_x_train_c = component.classifiers["c"].fit.call_args[0][0]
|
actual_x_train_c = component.classifiers["c"].fit.call_args[0][0]
|
||||||
@@ -56,16 +56,15 @@ def test_lazy_before():
|
|||||||
solver = LearningSolver()
|
solver = LearningSolver()
|
||||||
solver.internal_solver = Mock(spec=InternalSolver)
|
solver.internal_solver = Mock(spec=InternalSolver)
|
||||||
component = DynamicLazyConstraintsComponent(threshold=0.10)
|
component = DynamicLazyConstraintsComponent(threshold=0.10)
|
||||||
component.classifiers = {"a": Mock(spec=Classifier),
|
component.classifiers = {"a": Mock(spec=Classifier), "b": Mock(spec=Classifier)}
|
||||||
"b": Mock(spec=Classifier)}
|
|
||||||
component.classifiers["a"].predict_proba = Mock(return_value=[[0.95, 0.05]])
|
component.classifiers["a"].predict_proba = Mock(return_value=[[0.95, 0.05]])
|
||||||
component.classifiers["b"].predict_proba = Mock(return_value=[[0.02, 0.80]])
|
component.classifiers["b"].predict_proba = Mock(return_value=[[0.02, 0.80]])
|
||||||
|
|
||||||
component.before_solve(solver, instances[0], models[0])
|
component.before_solve(solver, instances[0], models[0])
|
||||||
|
|
||||||
# Should ask classifier likelihood of each constraint being violated
|
# Should ask classifier likelihood of each constraint being violated
|
||||||
expected_x_test_a = np.array([[67., 21.75, 1287.92]])
|
expected_x_test_a = np.array([[67.0, 21.75, 1287.92]])
|
||||||
expected_x_test_b = np.array([[67., 21.75, 1287.92]])
|
expected_x_test_b = np.array([[67.0, 21.75, 1287.92]])
|
||||||
actual_x_test_a = component.classifiers["a"].predict_proba.call_args[0][0]
|
actual_x_test_a = component.classifiers["a"].predict_proba.call_args[0][0]
|
||||||
actual_x_test_b = component.classifiers["b"].predict_proba.call_args[0][0]
|
actual_x_test_b = component.classifiers["b"].predict_proba.call_args[0][0]
|
||||||
assert norm(expected_x_test_a - actual_x_test_a) < E
|
assert norm(expected_x_test_a - actual_x_test_a) < E
|
||||||
@@ -82,13 +81,15 @@ def test_lazy_before():
|
|||||||
def test_lazy_evaluate():
|
def test_lazy_evaluate():
|
||||||
instances, models = get_test_pyomo_instances()
|
instances, models = get_test_pyomo_instances()
|
||||||
component = DynamicLazyConstraintsComponent()
|
component = DynamicLazyConstraintsComponent()
|
||||||
component.classifiers = {"a": Mock(spec=Classifier),
|
component.classifiers = {
|
||||||
"b": Mock(spec=Classifier),
|
"a": Mock(spec=Classifier),
|
||||||
"c": Mock(spec=Classifier)}
|
"b": Mock(spec=Classifier),
|
||||||
|
"c": Mock(spec=Classifier),
|
||||||
|
}
|
||||||
component.classifiers["a"].predict_proba = Mock(return_value=[[1.0, 0.0]])
|
component.classifiers["a"].predict_proba = Mock(return_value=[[1.0, 0.0]])
|
||||||
component.classifiers["b"].predict_proba = Mock(return_value=[[0.0, 1.0]])
|
component.classifiers["b"].predict_proba = Mock(return_value=[[0.0, 1.0]])
|
||||||
component.classifiers["c"].predict_proba = Mock(return_value=[[0.0, 1.0]])
|
component.classifiers["c"].predict_proba = Mock(return_value=[[0.0, 1.0]])
|
||||||
|
|
||||||
instances[0].found_violated_lazy_constraints = ["a", "b", "c"]
|
instances[0].found_violated_lazy_constraints = ["a", "b", "c"]
|
||||||
instances[1].found_violated_lazy_constraints = ["b", "d"]
|
instances[1].found_violated_lazy_constraints = ["b", "d"]
|
||||||
assert component.evaluate(instances) == {
|
assert component.evaluate(instances) == {
|
||||||
@@ -96,7 +97,7 @@ def test_lazy_evaluate():
|
|||||||
"Accuracy": 0.75,
|
"Accuracy": 0.75,
|
||||||
"F1 score": 0.8,
|
"F1 score": 0.8,
|
||||||
"Precision": 1.0,
|
"Precision": 1.0,
|
||||||
"Recall": 2/3.,
|
"Recall": 2 / 3.0,
|
||||||
"Predicted positive": 2,
|
"Predicted positive": 2,
|
||||||
"Predicted negative": 2,
|
"Predicted negative": 2,
|
||||||
"Condition positive": 3,
|
"Condition positive": 3,
|
||||||
@@ -135,6 +136,5 @@ def test_lazy_evaluate():
|
|||||||
"False positive (%)": 25.0,
|
"False positive (%)": 25.0,
|
||||||
"True negative (%)": 25.0,
|
"True negative (%)": 25.0,
|
||||||
"True positive (%)": 25.0,
|
"True positive (%)": 25.0,
|
||||||
}
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -4,10 +4,12 @@
|
|||||||
|
|
||||||
from unittest.mock import Mock, call
|
from unittest.mock import Mock, call
|
||||||
|
|
||||||
from miplearn import (StaticLazyConstraintsComponent,
|
from miplearn import (
|
||||||
LearningSolver,
|
StaticLazyConstraintsComponent,
|
||||||
Instance,
|
LearningSolver,
|
||||||
InternalSolver)
|
Instance,
|
||||||
|
InternalSolver,
|
||||||
|
)
|
||||||
from miplearn.classifiers import Classifier
|
from miplearn.classifiers import Classifier
|
||||||
|
|
||||||
|
|
||||||
@@ -23,39 +25,47 @@ def test_usage_with_solver():
|
|||||||
|
|
||||||
instance = Mock(spec=Instance)
|
instance = Mock(spec=Instance)
|
||||||
instance.has_static_lazy_constraints = Mock(return_value=True)
|
instance.has_static_lazy_constraints = Mock(return_value=True)
|
||||||
instance.is_constraint_lazy = Mock(side_effect=lambda cid: {
|
instance.is_constraint_lazy = Mock(
|
||||||
"c1": False,
|
side_effect=lambda cid: {
|
||||||
"c2": True,
|
"c1": False,
|
||||||
"c3": True,
|
"c2": True,
|
||||||
"c4": True,
|
"c3": True,
|
||||||
}[cid])
|
"c4": True,
|
||||||
instance.get_constraint_features = Mock(side_effect=lambda cid: {
|
}[cid]
|
||||||
"c2": [1.0, 0.0],
|
)
|
||||||
"c3": [0.5, 0.5],
|
instance.get_constraint_features = Mock(
|
||||||
"c4": [1.0],
|
side_effect=lambda cid: {
|
||||||
}[cid])
|
"c2": [1.0, 0.0],
|
||||||
instance.get_constraint_category = Mock(side_effect=lambda cid: {
|
"c3": [0.5, 0.5],
|
||||||
"c2": "type-a",
|
"c4": [1.0],
|
||||||
"c3": "type-a",
|
}[cid]
|
||||||
"c4": "type-b",
|
)
|
||||||
}[cid])
|
instance.get_constraint_category = Mock(
|
||||||
|
side_effect=lambda cid: {
|
||||||
|
"c2": "type-a",
|
||||||
|
"c3": "type-a",
|
||||||
|
"c4": "type-b",
|
||||||
|
}[cid]
|
||||||
|
)
|
||||||
|
|
||||||
component = StaticLazyConstraintsComponent(threshold=0.90,
|
component = StaticLazyConstraintsComponent(
|
||||||
use_two_phase_gap=False,
|
threshold=0.90, use_two_phase_gap=False, violation_tolerance=1.0
|
||||||
violation_tolerance=1.0)
|
)
|
||||||
component.classifiers = {
|
component.classifiers = {
|
||||||
"type-a": Mock(spec=Classifier),
|
"type-a": Mock(spec=Classifier),
|
||||||
"type-b": Mock(spec=Classifier),
|
"type-b": Mock(spec=Classifier),
|
||||||
}
|
}
|
||||||
component.classifiers["type-a"].predict_proba = \
|
component.classifiers["type-a"].predict_proba = Mock(
|
||||||
Mock(return_value=[
|
return_value=[
|
||||||
[0.20, 0.80],
|
[0.20, 0.80],
|
||||||
[0.05, 0.95],
|
[0.05, 0.95],
|
||||||
])
|
]
|
||||||
component.classifiers["type-b"].predict_proba = \
|
)
|
||||||
Mock(return_value=[
|
component.classifiers["type-b"].predict_proba = Mock(
|
||||||
|
return_value=[
|
||||||
[0.02, 0.98],
|
[0.02, 0.98],
|
||||||
])
|
]
|
||||||
|
)
|
||||||
|
|
||||||
# LearningSolver calls before_solve
|
# LearningSolver calls before_solve
|
||||||
component.before_solve(solver, instance, None)
|
component.before_solve(solver, instance, None)
|
||||||
@@ -67,37 +77,59 @@ def test_usage_with_solver():
|
|||||||
internal.get_constraint_ids.assert_called_once()
|
internal.get_constraint_ids.assert_called_once()
|
||||||
|
|
||||||
# Should ask if each constraint in the model is lazy
|
# Should ask if each constraint in the model is lazy
|
||||||
instance.is_constraint_lazy.assert_has_calls([
|
instance.is_constraint_lazy.assert_has_calls(
|
||||||
call("c1"), call("c2"), call("c3"), call("c4"),
|
[
|
||||||
])
|
call("c1"),
|
||||||
|
call("c2"),
|
||||||
|
call("c3"),
|
||||||
|
call("c4"),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
# For the lazy ones, should ask for features
|
# For the lazy ones, should ask for features
|
||||||
instance.get_constraint_features.assert_has_calls([
|
instance.get_constraint_features.assert_has_calls(
|
||||||
call("c2"), call("c3"), call("c4"),
|
[
|
||||||
])
|
call("c2"),
|
||||||
|
call("c3"),
|
||||||
|
call("c4"),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
# Should also ask for categories
|
# Should also ask for categories
|
||||||
assert instance.get_constraint_category.call_count == 3
|
assert instance.get_constraint_category.call_count == 3
|
||||||
instance.get_constraint_category.assert_has_calls([
|
instance.get_constraint_category.assert_has_calls(
|
||||||
call("c2"), call("c3"), call("c4"),
|
[
|
||||||
])
|
call("c2"),
|
||||||
|
call("c3"),
|
||||||
|
call("c4"),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
# Should ask internal solver to remove constraints identified as lazy
|
# Should ask internal solver to remove constraints identified as lazy
|
||||||
assert internal.extract_constraint.call_count == 3
|
assert internal.extract_constraint.call_count == 3
|
||||||
internal.extract_constraint.assert_has_calls([
|
internal.extract_constraint.assert_has_calls(
|
||||||
call("c2"), call("c3"), call("c4"),
|
[
|
||||||
])
|
call("c2"),
|
||||||
|
call("c3"),
|
||||||
|
call("c4"),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
# Should ask ML to predict whether each lazy constraint should be enforced
|
# Should ask ML to predict whether each lazy constraint should be enforced
|
||||||
component.classifiers["type-a"].predict_proba.assert_called_once_with([[1.0, 0.0], [0.5, 0.5]])
|
component.classifiers["type-a"].predict_proba.assert_called_once_with(
|
||||||
|
[[1.0, 0.0], [0.5, 0.5]]
|
||||||
|
)
|
||||||
component.classifiers["type-b"].predict_proba.assert_called_once_with([[1.0]])
|
component.classifiers["type-b"].predict_proba.assert_called_once_with([[1.0]])
|
||||||
|
|
||||||
# For the ones that should be enforced, should ask solver to re-add them
|
# For the ones that should be enforced, should ask solver to re-add them
|
||||||
# to the formulation. The remaining ones should remain in the pool.
|
# to the formulation. The remaining ones should remain in the pool.
|
||||||
assert internal.add_constraint.call_count == 2
|
assert internal.add_constraint.call_count == 2
|
||||||
internal.add_constraint.assert_has_calls([
|
internal.add_constraint.assert_has_calls(
|
||||||
call("<c3>"), call("<c4>"),
|
[
|
||||||
])
|
call("<c3>"),
|
||||||
|
call("<c4>"),
|
||||||
|
]
|
||||||
|
)
|
||||||
internal.add_constraint.reset_mock()
|
internal.add_constraint.reset_mock()
|
||||||
|
|
||||||
# LearningSolver calls after_iteration (first time)
|
# LearningSolver calls after_iteration (first time)
|
||||||
@@ -126,37 +158,45 @@ def test_usage_with_solver():
|
|||||||
def test_fit():
|
def test_fit():
|
||||||
instance_1 = Mock(spec=Instance)
|
instance_1 = Mock(spec=Instance)
|
||||||
instance_1.found_violated_lazy_constraints = ["c1", "c2", "c4", "c5"]
|
instance_1.found_violated_lazy_constraints = ["c1", "c2", "c4", "c5"]
|
||||||
instance_1.get_constraint_category = Mock(side_effect=lambda cid: {
|
instance_1.get_constraint_category = Mock(
|
||||||
"c1": "type-a",
|
side_effect=lambda cid: {
|
||||||
"c2": "type-a",
|
"c1": "type-a",
|
||||||
"c3": "type-a",
|
"c2": "type-a",
|
||||||
"c4": "type-b",
|
"c3": "type-a",
|
||||||
"c5": "type-b",
|
"c4": "type-b",
|
||||||
}[cid])
|
"c5": "type-b",
|
||||||
instance_1.get_constraint_features = Mock(side_effect=lambda cid: {
|
}[cid]
|
||||||
"c1": [1, 1],
|
)
|
||||||
"c2": [1, 2],
|
instance_1.get_constraint_features = Mock(
|
||||||
"c3": [1, 3],
|
side_effect=lambda cid: {
|
||||||
"c4": [1, 4, 0],
|
"c1": [1, 1],
|
||||||
"c5": [1, 5, 0],
|
"c2": [1, 2],
|
||||||
}[cid])
|
"c3": [1, 3],
|
||||||
|
"c4": [1, 4, 0],
|
||||||
|
"c5": [1, 5, 0],
|
||||||
|
}[cid]
|
||||||
|
)
|
||||||
|
|
||||||
instance_2 = Mock(spec=Instance)
|
instance_2 = Mock(spec=Instance)
|
||||||
instance_2.found_violated_lazy_constraints = ["c2", "c3", "c4"]
|
instance_2.found_violated_lazy_constraints = ["c2", "c3", "c4"]
|
||||||
instance_2.get_constraint_category = Mock(side_effect=lambda cid: {
|
instance_2.get_constraint_category = Mock(
|
||||||
"c1": "type-a",
|
side_effect=lambda cid: {
|
||||||
"c2": "type-a",
|
"c1": "type-a",
|
||||||
"c3": "type-a",
|
"c2": "type-a",
|
||||||
"c4": "type-b",
|
"c3": "type-a",
|
||||||
"c5": "type-b",
|
"c4": "type-b",
|
||||||
}[cid])
|
"c5": "type-b",
|
||||||
instance_2.get_constraint_features = Mock(side_effect=lambda cid: {
|
}[cid]
|
||||||
"c1": [2, 1],
|
)
|
||||||
"c2": [2, 2],
|
instance_2.get_constraint_features = Mock(
|
||||||
"c3": [2, 3],
|
side_effect=lambda cid: {
|
||||||
"c4": [2, 4, 0],
|
"c1": [2, 1],
|
||||||
"c5": [2, 5, 0],
|
"c2": [2, 2],
|
||||||
}[cid])
|
"c3": [2, 3],
|
||||||
|
"c4": [2, 4, 0],
|
||||||
|
"c5": [2, 5, 0],
|
||||||
|
}[cid]
|
||||||
|
)
|
||||||
|
|
||||||
instances = [instance_1, instance_2]
|
instances = [instance_1, instance_2]
|
||||||
component = StaticLazyConstraintsComponent()
|
component = StaticLazyConstraintsComponent()
|
||||||
@@ -171,18 +211,22 @@ def test_fit():
|
|||||||
}
|
}
|
||||||
expected_x = {
|
expected_x = {
|
||||||
"type-a": [[1, 1], [1, 2], [1, 3], [2, 1], [2, 2], [2, 3]],
|
"type-a": [[1, 1], [1, 2], [1, 3], [2, 1], [2, 2], [2, 3]],
|
||||||
"type-b": [[1, 4, 0], [1, 5, 0], [2, 4, 0], [2, 5, 0]]
|
"type-b": [[1, 4, 0], [1, 5, 0], [2, 4, 0], [2, 5, 0]],
|
||||||
}
|
}
|
||||||
expected_y = {
|
expected_y = {
|
||||||
"type-a": [[0, 1], [0, 1], [1, 0], [1, 0], [0, 1], [0, 1]],
|
"type-a": [[0, 1], [0, 1], [1, 0], [1, 0], [0, 1], [0, 1]],
|
||||||
"type-b": [[0, 1], [0, 1], [0, 1], [1, 0]]
|
"type-b": [[0, 1], [0, 1], [0, 1], [1, 0]],
|
||||||
}
|
}
|
||||||
assert component._collect_constraints(instances) == expected_constraints
|
assert component._collect_constraints(instances) == expected_constraints
|
||||||
assert component.x(instances) == expected_x
|
assert component.x(instances) == expected_x
|
||||||
assert component.y(instances) == expected_y
|
assert component.y(instances) == expected_y
|
||||||
|
|
||||||
component.fit(instances)
|
component.fit(instances)
|
||||||
component.classifiers["type-a"].fit.assert_called_once_with(expected_x["type-a"],
|
component.classifiers["type-a"].fit.assert_called_once_with(
|
||||||
expected_y["type-a"])
|
expected_x["type-a"],
|
||||||
component.classifiers["type-b"].fit.assert_called_once_with(expected_x["type-b"],
|
expected_y["type-a"],
|
||||||
expected_y["type-b"])
|
)
|
||||||
|
component.classifiers["type-b"].fit.assert_called_once_with(
|
||||||
|
expected_x["type-b"],
|
||||||
|
expected_y["type-b"],
|
||||||
|
)
|
||||||
|
|||||||
@@ -16,8 +16,10 @@ def test_usage():
|
|||||||
comp.fit(instances)
|
comp.fit(instances)
|
||||||
assert instances[0].lower_bound == 1183.0
|
assert instances[0].lower_bound == 1183.0
|
||||||
assert instances[0].upper_bound == 1183.0
|
assert instances[0].upper_bound == 1183.0
|
||||||
assert np.round(comp.predict(instances), 2).tolist() == [[1183.0, 1183.0],
|
assert np.round(comp.predict(instances), 2).tolist() == [
|
||||||
[1070.0, 1070.0]]
|
[1183.0, 1183.0],
|
||||||
|
[1070.0, 1070.0],
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
def test_obj_evaluate():
|
def test_obj_evaluate():
|
||||||
@@ -28,20 +30,20 @@ def test_obj_evaluate():
|
|||||||
comp.fit(instances)
|
comp.fit(instances)
|
||||||
ev = comp.evaluate(instances)
|
ev = comp.evaluate(instances)
|
||||||
assert ev == {
|
assert ev == {
|
||||||
'Lower bound': {
|
"Lower bound": {
|
||||||
'Explained variance': 0.0,
|
"Explained variance": 0.0,
|
||||||
'Max error': 183.0,
|
"Max error": 183.0,
|
||||||
'Mean absolute error': 126.5,
|
"Mean absolute error": 126.5,
|
||||||
'Mean squared error': 19194.5,
|
"Mean squared error": 19194.5,
|
||||||
'Median absolute error': 126.5,
|
"Median absolute error": 126.5,
|
||||||
'R2': -5.012843605607331,
|
"R2": -5.012843605607331,
|
||||||
|
},
|
||||||
|
"Upper bound": {
|
||||||
|
"Explained variance": 0.0,
|
||||||
|
"Max error": 183.0,
|
||||||
|
"Mean absolute error": 126.5,
|
||||||
|
"Mean squared error": 19194.5,
|
||||||
|
"Median absolute error": 126.5,
|
||||||
|
"R2": -5.012843605607331,
|
||||||
},
|
},
|
||||||
'Upper bound': {
|
|
||||||
'Explained variance': 0.0,
|
|
||||||
'Max error': 183.0,
|
|
||||||
'Mean absolute error': 126.5,
|
|
||||||
'Mean squared error': 19194.5,
|
|
||||||
'Median absolute error': 126.5,
|
|
||||||
'R2': -5.012843605607331,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -25,71 +25,82 @@ def test_predict():
|
|||||||
def test_evaluate():
|
def test_evaluate():
|
||||||
instances, models = get_test_pyomo_instances()
|
instances, models = get_test_pyomo_instances()
|
||||||
clf_zero = Mock(spec=Classifier)
|
clf_zero = Mock(spec=Classifier)
|
||||||
clf_zero.predict_proba = Mock(return_value=np.array([
|
clf_zero.predict_proba = Mock(
|
||||||
[0., 1.], # x[0]
|
return_value=np.array(
|
||||||
[0., 1.], # x[1]
|
[
|
||||||
[1., 0.], # x[2]
|
[0.0, 1.0], # x[0]
|
||||||
[1., 0.], # x[3]
|
[0.0, 1.0], # x[1]
|
||||||
]))
|
[1.0, 0.0], # x[2]
|
||||||
|
[1.0, 0.0], # x[3]
|
||||||
|
]
|
||||||
|
)
|
||||||
|
)
|
||||||
clf_one = Mock(spec=Classifier)
|
clf_one = Mock(spec=Classifier)
|
||||||
clf_one.predict_proba = Mock(return_value=np.array([
|
clf_one.predict_proba = Mock(
|
||||||
[1., 0.], # x[0] instances[0]
|
return_value=np.array(
|
||||||
[1., 0.], # x[1] instances[0]
|
[
|
||||||
[0., 1.], # x[2] instances[0]
|
[1.0, 0.0], # x[0] instances[0]
|
||||||
[1., 0.], # x[3] instances[0]
|
[1.0, 0.0], # x[1] instances[0]
|
||||||
]))
|
[0.0, 1.0], # x[2] instances[0]
|
||||||
comp = PrimalSolutionComponent(classifier=[clf_zero, clf_one],
|
[1.0, 0.0], # x[3] instances[0]
|
||||||
threshold=0.50)
|
]
|
||||||
|
)
|
||||||
|
)
|
||||||
|
comp = PrimalSolutionComponent(classifier=[clf_zero, clf_one], threshold=0.50)
|
||||||
comp.fit(instances[:1])
|
comp.fit(instances[:1])
|
||||||
assert comp.predict(instances[0]) == {"x": {0: 0,
|
assert comp.predict(instances[0]) == {"x": {0: 0, 1: 0, 2: 1, 3: None}}
|
||||||
1: 0,
|
assert instances[0].solution == {"x": {0: 1, 1: 0, 2: 1, 3: 1}}
|
||||||
2: 1,
|
|
||||||
3: None}}
|
|
||||||
assert instances[0].solution == {"x": {0: 1,
|
|
||||||
1: 0,
|
|
||||||
2: 1,
|
|
||||||
3: 1}}
|
|
||||||
ev = comp.evaluate(instances[:1])
|
ev = comp.evaluate(instances[:1])
|
||||||
assert ev == {'Fix one': {0: {'Accuracy': 0.5,
|
assert ev == {
|
||||||
'Condition negative': 1,
|
"Fix one": {
|
||||||
'Condition negative (%)': 25.0,
|
0: {
|
||||||
'Condition positive': 3,
|
"Accuracy": 0.5,
|
||||||
'Condition positive (%)': 75.0,
|
"Condition negative": 1,
|
||||||
'F1 score': 0.5,
|
"Condition negative (%)": 25.0,
|
||||||
'False negative': 2,
|
"Condition positive": 3,
|
||||||
'False negative (%)': 50.0,
|
"Condition positive (%)": 75.0,
|
||||||
'False positive': 0,
|
"F1 score": 0.5,
|
||||||
'False positive (%)': 0.0,
|
"False negative": 2,
|
||||||
'Precision': 1.0,
|
"False negative (%)": 50.0,
|
||||||
'Predicted negative': 3,
|
"False positive": 0,
|
||||||
'Predicted negative (%)': 75.0,
|
"False positive (%)": 0.0,
|
||||||
'Predicted positive': 1,
|
"Precision": 1.0,
|
||||||
'Predicted positive (%)': 25.0,
|
"Predicted negative": 3,
|
||||||
'Recall': 0.3333333333333333,
|
"Predicted negative (%)": 75.0,
|
||||||
'True negative': 1,
|
"Predicted positive": 1,
|
||||||
'True negative (%)': 25.0,
|
"Predicted positive (%)": 25.0,
|
||||||
'True positive': 1,
|
"Recall": 0.3333333333333333,
|
||||||
'True positive (%)': 25.0}},
|
"True negative": 1,
|
||||||
'Fix zero': {0: {'Accuracy': 0.75,
|
"True negative (%)": 25.0,
|
||||||
'Condition negative': 3,
|
"True positive": 1,
|
||||||
'Condition negative (%)': 75.0,
|
"True positive (%)": 25.0,
|
||||||
'Condition positive': 1,
|
}
|
||||||
'Condition positive (%)': 25.0,
|
},
|
||||||
'F1 score': 0.6666666666666666,
|
"Fix zero": {
|
||||||
'False negative': 0,
|
0: {
|
||||||
'False negative (%)': 0.0,
|
"Accuracy": 0.75,
|
||||||
'False positive': 1,
|
"Condition negative": 3,
|
||||||
'False positive (%)': 25.0,
|
"Condition negative (%)": 75.0,
|
||||||
'Precision': 0.5,
|
"Condition positive": 1,
|
||||||
'Predicted negative': 2,
|
"Condition positive (%)": 25.0,
|
||||||
'Predicted negative (%)': 50.0,
|
"F1 score": 0.6666666666666666,
|
||||||
'Predicted positive': 2,
|
"False negative": 0,
|
||||||
'Predicted positive (%)': 50.0,
|
"False negative (%)": 0.0,
|
||||||
'Recall': 1.0,
|
"False positive": 1,
|
||||||
'True negative': 2,
|
"False positive (%)": 25.0,
|
||||||
'True negative (%)': 50.0,
|
"Precision": 0.5,
|
||||||
'True positive': 1,
|
"Predicted negative": 2,
|
||||||
'True positive (%)': 25.0}}}
|
"Predicted negative (%)": 50.0,
|
||||||
|
"Predicted positive": 2,
|
||||||
|
"Predicted positive (%)": 50.0,
|
||||||
|
"Recall": 1.0,
|
||||||
|
"True negative": 2,
|
||||||
|
"True negative (%)": 50.0,
|
||||||
|
"True positive": 1,
|
||||||
|
"True positive (%)": 25.0,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
def test_primal_parallel_fit():
|
def test_primal_parallel_fit():
|
||||||
|
|||||||
@@ -4,10 +4,7 @@
|
|||||||
|
|
||||||
from unittest.mock import Mock, call
|
from unittest.mock import Mock, call
|
||||||
|
|
||||||
from miplearn import (RelaxationComponent,
|
from miplearn import RelaxationComponent, LearningSolver, Instance, InternalSolver
|
||||||
LearningSolver,
|
|
||||||
Instance,
|
|
||||||
InternalSolver)
|
|
||||||
from miplearn.classifiers import Classifier
|
from miplearn.classifiers import Classifier
|
||||||
|
|
||||||
|
|
||||||
@@ -16,41 +13,49 @@ def _setup():
|
|||||||
|
|
||||||
internal = solver.internal_solver = Mock(spec=InternalSolver)
|
internal = solver.internal_solver = Mock(spec=InternalSolver)
|
||||||
internal.get_constraint_ids = Mock(return_value=["c1", "c2", "c3", "c4"])
|
internal.get_constraint_ids = Mock(return_value=["c1", "c2", "c3", "c4"])
|
||||||
internal.get_constraint_slacks = Mock(side_effect=lambda: {
|
internal.get_constraint_slacks = Mock(
|
||||||
"c1": 0.5,
|
side_effect=lambda: {
|
||||||
"c2": 0.0,
|
"c1": 0.5,
|
||||||
"c3": 0.0,
|
"c2": 0.0,
|
||||||
"c4": 1.4,
|
"c3": 0.0,
|
||||||
})
|
"c4": 1.4,
|
||||||
|
}
|
||||||
|
)
|
||||||
internal.extract_constraint = Mock(side_effect=lambda cid: "<%s>" % cid)
|
internal.extract_constraint = Mock(side_effect=lambda cid: "<%s>" % cid)
|
||||||
internal.is_constraint_satisfied = Mock(return_value=False)
|
internal.is_constraint_satisfied = Mock(return_value=False)
|
||||||
|
|
||||||
instance = Mock(spec=Instance)
|
instance = Mock(spec=Instance)
|
||||||
instance.get_constraint_features = Mock(side_effect=lambda cid: {
|
instance.get_constraint_features = Mock(
|
||||||
"c2": [1.0, 0.0],
|
side_effect=lambda cid: {
|
||||||
"c3": [0.5, 0.5],
|
"c2": [1.0, 0.0],
|
||||||
"c4": [1.0],
|
"c3": [0.5, 0.5],
|
||||||
}[cid])
|
"c4": [1.0],
|
||||||
instance.get_constraint_category = Mock(side_effect=lambda cid: {
|
}[cid]
|
||||||
"c1": None,
|
)
|
||||||
"c2": "type-a",
|
instance.get_constraint_category = Mock(
|
||||||
"c3": "type-a",
|
side_effect=lambda cid: {
|
||||||
"c4": "type-b",
|
"c1": None,
|
||||||
}[cid])
|
"c2": "type-a",
|
||||||
|
"c3": "type-a",
|
||||||
|
"c4": "type-b",
|
||||||
|
}[cid]
|
||||||
|
)
|
||||||
|
|
||||||
classifiers = {
|
classifiers = {
|
||||||
"type-a": Mock(spec=Classifier),
|
"type-a": Mock(spec=Classifier),
|
||||||
"type-b": Mock(spec=Classifier),
|
"type-b": Mock(spec=Classifier),
|
||||||
}
|
}
|
||||||
classifiers["type-a"].predict_proba = \
|
classifiers["type-a"].predict_proba = Mock(
|
||||||
Mock(return_value=[
|
return_value=[
|
||||||
[0.20, 0.80],
|
[0.20, 0.80],
|
||||||
[0.05, 0.95],
|
[0.05, 0.95],
|
||||||
])
|
]
|
||||||
classifiers["type-b"].predict_proba = \
|
)
|
||||||
Mock(return_value=[
|
classifiers["type-b"].predict_proba = Mock(
|
||||||
|
return_value=[
|
||||||
[0.02, 0.98],
|
[0.02, 0.98],
|
||||||
])
|
]
|
||||||
|
)
|
||||||
|
|
||||||
return solver, internal, instance, classifiers
|
return solver, internal, instance, classifiers
|
||||||
|
|
||||||
@@ -72,25 +77,39 @@ def test_usage():
|
|||||||
|
|
||||||
# Should query category and features for each constraint in the model
|
# Should query category and features for each constraint in the model
|
||||||
assert instance.get_constraint_category.call_count == 4
|
assert instance.get_constraint_category.call_count == 4
|
||||||
instance.get_constraint_category.assert_has_calls([
|
instance.get_constraint_category.assert_has_calls(
|
||||||
call("c1"), call("c2"), call("c3"), call("c4"),
|
[
|
||||||
])
|
call("c1"),
|
||||||
|
call("c2"),
|
||||||
|
call("c3"),
|
||||||
|
call("c4"),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
# For constraint with non-null categories, should ask for features
|
# For constraint with non-null categories, should ask for features
|
||||||
assert instance.get_constraint_features.call_count == 3
|
assert instance.get_constraint_features.call_count == 3
|
||||||
instance.get_constraint_features.assert_has_calls([
|
instance.get_constraint_features.assert_has_calls(
|
||||||
call("c2"), call("c3"), call("c4"),
|
[
|
||||||
])
|
call("c2"),
|
||||||
|
call("c3"),
|
||||||
|
call("c4"),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
# Should ask ML to predict whether constraint should be removed
|
# Should ask ML to predict whether constraint should be removed
|
||||||
component.classifiers["type-a"].predict_proba.assert_called_once_with([[1.0, 0.0], [0.5, 0.5]])
|
component.classifiers["type-a"].predict_proba.assert_called_once_with(
|
||||||
|
[[1.0, 0.0], [0.5, 0.5]]
|
||||||
|
)
|
||||||
component.classifiers["type-b"].predict_proba.assert_called_once_with([[1.0]])
|
component.classifiers["type-b"].predict_proba.assert_called_once_with([[1.0]])
|
||||||
|
|
||||||
# Should ask internal solver to remove constraints predicted as redundant
|
# Should ask internal solver to remove constraints predicted as redundant
|
||||||
assert internal.extract_constraint.call_count == 2
|
assert internal.extract_constraint.call_count == 2
|
||||||
internal.extract_constraint.assert_has_calls([
|
internal.extract_constraint.assert_has_calls(
|
||||||
call("c3"), call("c4"),
|
[
|
||||||
])
|
call("c3"),
|
||||||
|
call("c4"),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
# LearningSolver calls after_solve
|
# LearningSolver calls after_solve
|
||||||
component.after_solve(solver, instance, None, None)
|
component.after_solve(solver, instance, None, None)
|
||||||
@@ -111,8 +130,7 @@ def test_usage():
|
|||||||
def test_usage_with_check_dropped():
|
def test_usage_with_check_dropped():
|
||||||
solver, internal, instance, classifiers = _setup()
|
solver, internal, instance, classifiers = _setup()
|
||||||
|
|
||||||
component = RelaxationComponent(check_dropped=True,
|
component = RelaxationComponent(check_dropped=True, violation_tolerance=1e-3)
|
||||||
violation_tolerance=1e-3)
|
|
||||||
component.classifiers = classifiers
|
component.classifiers = classifiers
|
||||||
|
|
||||||
# LearningSolver call before_solve
|
# LearningSolver call before_solve
|
||||||
@@ -120,9 +138,12 @@ def test_usage_with_check_dropped():
|
|||||||
|
|
||||||
# Assert constraints are extracted
|
# Assert constraints are extracted
|
||||||
assert internal.extract_constraint.call_count == 2
|
assert internal.extract_constraint.call_count == 2
|
||||||
internal.extract_constraint.assert_has_calls([
|
internal.extract_constraint.assert_has_calls(
|
||||||
call("c3"), call("c4"),
|
[
|
||||||
])
|
call("c3"),
|
||||||
|
call("c4"),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
# LearningSolver calls iteration_cb (first time)
|
# LearningSolver calls iteration_cb (first time)
|
||||||
should_repeat = component.iteration_cb(solver, instance, None)
|
should_repeat = component.iteration_cb(solver, instance, None)
|
||||||
@@ -131,15 +152,15 @@ def test_usage_with_check_dropped():
|
|||||||
assert should_repeat
|
assert should_repeat
|
||||||
|
|
||||||
# Should ask solver if removed constraints are satisfied (mock always returns false)
|
# Should ask solver if removed constraints are satisfied (mock always returns false)
|
||||||
internal.is_constraint_satisfied.assert_has_calls([
|
internal.is_constraint_satisfied.assert_has_calls(
|
||||||
call("<c3>", 1e-3),
|
[
|
||||||
call("<c4>", 1e-3),
|
call("<c3>", 1e-3),
|
||||||
])
|
call("<c4>", 1e-3),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
# Should add constraints back to LP relaxation
|
# Should add constraints back to LP relaxation
|
||||||
internal.add_constraint.assert_has_calls([
|
internal.add_constraint.assert_has_calls([call("<c3>"), call("<c4>")])
|
||||||
call("<c3>"), call("<c4>")
|
|
||||||
])
|
|
||||||
|
|
||||||
# LearningSolver calls iteration_cb (second time)
|
# LearningSolver calls iteration_cb (second time)
|
||||||
should_repeat = component.iteration_cb(solver, instance, None)
|
should_repeat = component.iteration_cb(solver, instance, None)
|
||||||
@@ -148,21 +169,22 @@ def test_usage_with_check_dropped():
|
|||||||
|
|
||||||
def test_x_y_fit_predict_evaluate():
|
def test_x_y_fit_predict_evaluate():
|
||||||
instances = [Mock(spec=Instance), Mock(spec=Instance)]
|
instances = [Mock(spec=Instance), Mock(spec=Instance)]
|
||||||
component = RelaxationComponent(slack_tolerance=0.05,
|
component = RelaxationComponent(slack_tolerance=0.05, threshold=0.80)
|
||||||
threshold=0.80)
|
|
||||||
component.classifiers = {
|
component.classifiers = {
|
||||||
"type-a": Mock(spec=Classifier),
|
"type-a": Mock(spec=Classifier),
|
||||||
"type-b": Mock(spec=Classifier),
|
"type-b": Mock(spec=Classifier),
|
||||||
}
|
}
|
||||||
component.classifiers["type-a"].predict_proba = \
|
component.classifiers["type-a"].predict_proba = Mock(
|
||||||
Mock(return_value=[
|
return_value=[
|
||||||
[0.20, 0.80],
|
[0.20, 0.80],
|
||||||
])
|
]
|
||||||
component.classifiers["type-b"].predict_proba = \
|
)
|
||||||
Mock(return_value=[
|
component.classifiers["type-b"].predict_proba = Mock(
|
||||||
|
return_value=[
|
||||||
[0.50, 0.50],
|
[0.50, 0.50],
|
||||||
[0.05, 0.95],
|
[0.05, 0.95],
|
||||||
])
|
]
|
||||||
|
)
|
||||||
|
|
||||||
# First mock instance
|
# First mock instance
|
||||||
instances[0].slacks = {
|
instances[0].slacks = {
|
||||||
@@ -171,17 +193,21 @@ def test_x_y_fit_predict_evaluate():
|
|||||||
"c3": 0.00,
|
"c3": 0.00,
|
||||||
"c4": 30.0,
|
"c4": 30.0,
|
||||||
}
|
}
|
||||||
instances[0].get_constraint_category = Mock(side_effect=lambda cid: {
|
instances[0].get_constraint_category = Mock(
|
||||||
"c1": None,
|
side_effect=lambda cid: {
|
||||||
"c2": "type-a",
|
"c1": None,
|
||||||
"c3": "type-a",
|
"c2": "type-a",
|
||||||
"c4": "type-b",
|
"c3": "type-a",
|
||||||
}[cid])
|
"c4": "type-b",
|
||||||
instances[0].get_constraint_features = Mock(side_effect=lambda cid: {
|
}[cid]
|
||||||
"c2": [1.0, 0.0],
|
)
|
||||||
"c3": [0.5, 0.5],
|
instances[0].get_constraint_features = Mock(
|
||||||
"c4": [1.0],
|
side_effect=lambda cid: {
|
||||||
}[cid])
|
"c2": [1.0, 0.0],
|
||||||
|
"c3": [0.5, 0.5],
|
||||||
|
"c4": [1.0],
|
||||||
|
}[cid]
|
||||||
|
)
|
||||||
|
|
||||||
# Second mock instance
|
# Second mock instance
|
||||||
instances[1].slacks = {
|
instances[1].slacks = {
|
||||||
@@ -190,26 +216,27 @@ def test_x_y_fit_predict_evaluate():
|
|||||||
"c4": 0.00,
|
"c4": 0.00,
|
||||||
"c5": 0.00,
|
"c5": 0.00,
|
||||||
}
|
}
|
||||||
instances[1].get_constraint_category = Mock(side_effect=lambda cid: {
|
instances[1].get_constraint_category = Mock(
|
||||||
"c1": None,
|
side_effect=lambda cid: {
|
||||||
"c3": "type-a",
|
"c1": None,
|
||||||
"c4": "type-b",
|
"c3": "type-a",
|
||||||
"c5": "type-b",
|
"c4": "type-b",
|
||||||
}[cid])
|
"c5": "type-b",
|
||||||
instances[1].get_constraint_features = Mock(side_effect=lambda cid: {
|
}[cid]
|
||||||
"c3": [0.3, 0.4],
|
)
|
||||||
"c4": [0.7],
|
instances[1].get_constraint_features = Mock(
|
||||||
"c5": [0.8],
|
side_effect=lambda cid: {
|
||||||
}[cid])
|
"c3": [0.3, 0.4],
|
||||||
|
"c4": [0.7],
|
||||||
|
"c5": [0.8],
|
||||||
|
}[cid]
|
||||||
|
)
|
||||||
|
|
||||||
expected_x = {
|
expected_x = {
|
||||||
"type-a": [[1.0, 0.0], [0.5, 0.5], [0.3, 0.4]],
|
"type-a": [[1.0, 0.0], [0.5, 0.5], [0.3, 0.4]],
|
||||||
"type-b": [[1.0], [0.7], [0.8]],
|
"type-b": [[1.0], [0.7], [0.8]],
|
||||||
}
|
}
|
||||||
expected_y = {
|
expected_y = {"type-a": [[0], [0], [1]], "type-b": [[1], [0], [0]]}
|
||||||
"type-a": [[0], [0], [1]],
|
|
||||||
"type-b": [[1], [0], [0]]
|
|
||||||
}
|
|
||||||
|
|
||||||
# Should build X and Y matrices correctly
|
# Should build X and Y matrices correctly
|
||||||
assert component.x(instances) == expected_x
|
assert component.x(instances) == expected_x
|
||||||
@@ -217,13 +244,16 @@ def test_x_y_fit_predict_evaluate():
|
|||||||
|
|
||||||
# Should pass along X and Y matrices to classifiers
|
# Should pass along X and Y matrices to classifiers
|
||||||
component.fit(instances)
|
component.fit(instances)
|
||||||
component.classifiers["type-a"].fit.assert_called_with(expected_x["type-a"], expected_y["type-a"])
|
component.classifiers["type-a"].fit.assert_called_with(
|
||||||
component.classifiers["type-b"].fit.assert_called_with(expected_x["type-b"], expected_y["type-b"])
|
expected_x["type-a"],
|
||||||
|
expected_y["type-a"],
|
||||||
|
)
|
||||||
|
component.classifiers["type-b"].fit.assert_called_with(
|
||||||
|
expected_x["type-b"],
|
||||||
|
expected_y["type-b"],
|
||||||
|
)
|
||||||
|
|
||||||
assert component.predict(expected_x) == {
|
assert component.predict(expected_x) == {"type-a": [[1]], "type-b": [[0], [1]]}
|
||||||
"type-a": [[1]],
|
|
||||||
"type-b": [[0], [1]]
|
|
||||||
}
|
|
||||||
|
|
||||||
ev = component.evaluate(instances[1])
|
ev = component.evaluate(instances[1])
|
||||||
assert ev["True positive"] == 1
|
assert ev["True positive"] == 1
|
||||||
|
|||||||
@@ -18,10 +18,10 @@ class InstanceIterator:
|
|||||||
def __init__(self, instances):
|
def __init__(self, instances):
|
||||||
self.instances = instances
|
self.instances = instances
|
||||||
self.current = 0
|
self.current = 0
|
||||||
|
|
||||||
def __iter__(self):
|
def __iter__(self):
|
||||||
return self
|
return self
|
||||||
|
|
||||||
def __next__(self):
|
def __next__(self):
|
||||||
if self.current >= len(self.instances):
|
if self.current >= len(self.instances):
|
||||||
raise StopIteration
|
raise StopIteration
|
||||||
@@ -40,9 +40,9 @@ class InstanceIterator:
|
|||||||
|
|
||||||
class Extractor(ABC):
|
class Extractor(ABC):
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def extract(self, instances,):
|
def extract(self, instances):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def split_variables(instance):
|
def split_variables(instance):
|
||||||
assert hasattr(instance, "lp_solution")
|
assert hasattr(instance, "lp_solution")
|
||||||
@@ -57,13 +57,15 @@ class Extractor(ABC):
|
|||||||
result[category] += [(var_name, index)]
|
result[category] += [(var_name, index)]
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
class VariableFeaturesExtractor(Extractor):
|
class VariableFeaturesExtractor(Extractor):
|
||||||
def extract(self, instances):
|
def extract(self, instances):
|
||||||
result = {}
|
result = {}
|
||||||
for instance in tqdm(InstanceIterator(instances),
|
for instance in tqdm(
|
||||||
desc="Extract (vars)",
|
InstanceIterator(instances),
|
||||||
disable=len(instances) < 5):
|
desc="Extract (vars)",
|
||||||
|
disable=len(instances) < 5,
|
||||||
|
):
|
||||||
instance_features = instance.get_instance_features()
|
instance_features = instance.get_instance_features()
|
||||||
var_split = self.split_variables(instance)
|
var_split = self.split_variables(instance)
|
||||||
for (category, var_index_pairs) in var_split.items():
|
for (category, var_index_pairs) in var_split.items():
|
||||||
@@ -71,9 +73,9 @@ class VariableFeaturesExtractor(Extractor):
|
|||||||
result[category] = []
|
result[category] = []
|
||||||
for (var_name, index) in var_index_pairs:
|
for (var_name, index) in var_index_pairs:
|
||||||
result[category] += [
|
result[category] += [
|
||||||
instance_features.tolist() + \
|
instance_features.tolist()
|
||||||
instance.get_variable_features(var_name, index).tolist() + \
|
+ instance.get_variable_features(var_name, index).tolist()
|
||||||
[instance.lp_solution[var_name][index]]
|
+ [instance.lp_solution[var_name][index]]
|
||||||
]
|
]
|
||||||
for category in result:
|
for category in result:
|
||||||
result[category] = np.array(result[category])
|
result[category] = np.array(result[category])
|
||||||
@@ -83,12 +85,14 @@ class VariableFeaturesExtractor(Extractor):
|
|||||||
class SolutionExtractor(Extractor):
|
class SolutionExtractor(Extractor):
|
||||||
def __init__(self, relaxation=False):
|
def __init__(self, relaxation=False):
|
||||||
self.relaxation = relaxation
|
self.relaxation = relaxation
|
||||||
|
|
||||||
def extract(self, instances):
|
def extract(self, instances):
|
||||||
result = {}
|
result = {}
|
||||||
for instance in tqdm(InstanceIterator(instances),
|
for instance in tqdm(
|
||||||
desc="Extract (solution)",
|
InstanceIterator(instances),
|
||||||
disable=len(instances) < 5):
|
desc="Extract (solution)",
|
||||||
|
disable=len(instances) < 5,
|
||||||
|
):
|
||||||
var_split = self.split_variables(instance)
|
var_split = self.split_variables(instance)
|
||||||
for (category, var_index_pairs) in var_split.items():
|
for (category, var_index_pairs) in var_split.items():
|
||||||
if category not in result:
|
if category not in result:
|
||||||
@@ -103,33 +107,40 @@ class SolutionExtractor(Extractor):
|
|||||||
else:
|
else:
|
||||||
result[category] += [[1 - v, v]]
|
result[category] += [[1 - v, v]]
|
||||||
for category in result:
|
for category in result:
|
||||||
result[category] = np.array(result[category])
|
result[category] = np.array(result[category])
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
class InstanceFeaturesExtractor(Extractor):
|
class InstanceFeaturesExtractor(Extractor):
|
||||||
def extract(self, instances):
|
def extract(self, instances):
|
||||||
return np.vstack([
|
return np.vstack(
|
||||||
np.hstack([
|
[
|
||||||
instance.get_instance_features(),
|
np.hstack(
|
||||||
instance.lp_value,
|
[
|
||||||
])
|
instance.get_instance_features(),
|
||||||
for instance in InstanceIterator(instances)
|
instance.lp_value,
|
||||||
])
|
]
|
||||||
|
)
|
||||||
|
for instance in InstanceIterator(instances)
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class ObjectiveValueExtractor(Extractor):
|
class ObjectiveValueExtractor(Extractor):
|
||||||
def __init__(self, kind="lp"):
|
def __init__(self, kind="lp"):
|
||||||
assert kind in ["lower bound", "upper bound", "lp"]
|
assert kind in ["lower bound", "upper bound", "lp"]
|
||||||
self.kind = kind
|
self.kind = kind
|
||||||
|
|
||||||
def extract(self, instances):
|
def extract(self, instances):
|
||||||
if self.kind == "lower bound":
|
if self.kind == "lower bound":
|
||||||
return np.array([[instance.lower_bound]
|
return np.array(
|
||||||
for instance in InstanceIterator(instances)])
|
[[instance.lower_bound] for instance in InstanceIterator(instances)]
|
||||||
|
)
|
||||||
if self.kind == "upper bound":
|
if self.kind == "upper bound":
|
||||||
return np.array([[instance.upper_bound]
|
return np.array(
|
||||||
for instance in InstanceIterator(instances)])
|
[[instance.upper_bound] for instance in InstanceIterator(instances)]
|
||||||
|
)
|
||||||
if self.kind == "lp":
|
if self.kind == "lp":
|
||||||
return np.array([[instance.lp_value]
|
return np.array(
|
||||||
for instance in InstanceIterator(instances)])
|
[[instance.lp_value] for instance in InstanceIterator(instances)]
|
||||||
|
)
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ import numpy as np
|
|||||||
class Instance(ABC):
|
class Instance(ABC):
|
||||||
"""
|
"""
|
||||||
Abstract class holding all the data necessary to generate a concrete model of the problem.
|
Abstract class holding all the data necessary to generate a concrete model of the problem.
|
||||||
|
|
||||||
In the knapsack problem, for example, this class could hold the number of items, their weights
|
In the knapsack problem, for example, this class could hold the number of items, their weights
|
||||||
and costs, as well as the size of the knapsack. Objects implementing this class are able to
|
and costs, as well as the size of the knapsack. Objects implementing this class are able to
|
||||||
convert themselves into a concrete optimization model, which can be optimized by a solver, or
|
convert themselves into a concrete optimization model, which can be optimized by a solver, or
|
||||||
@@ -29,17 +29,17 @@ class Instance(ABC):
|
|||||||
def get_instance_features(self):
|
def get_instance_features(self):
|
||||||
"""
|
"""
|
||||||
Returns a 1-dimensional Numpy array of (numerical) features describing the entire instance.
|
Returns a 1-dimensional Numpy array of (numerical) features describing the entire instance.
|
||||||
|
|
||||||
The array is used by LearningSolver to determine how similar two instances are. It may also
|
The array is used by LearningSolver to determine how similar two instances are. It may also
|
||||||
be used to predict, in combination with variable-specific features, the values of binary
|
be used to predict, in combination with variable-specific features, the values of binary
|
||||||
decision variables in the problem.
|
decision variables in the problem.
|
||||||
|
|
||||||
There is not necessarily a one-to-one correspondence between models and instance features:
|
There is not necessarily a one-to-one correspondence between models and instance features:
|
||||||
the features may encode only part of the data necessary to generate the complete model.
|
the features may encode only part of the data necessary to generate the complete model.
|
||||||
Features may also be statistics computed from the original data. For example, in the
|
Features may also be statistics computed from the original data. For example, in the
|
||||||
knapsack problem, an implementation may decide to provide as instance features only
|
knapsack problem, an implementation may decide to provide as instance features only
|
||||||
the average weights, average prices, number of items and the size of the knapsack.
|
the average weights, average prices, number of items and the size of the knapsack.
|
||||||
|
|
||||||
The returned array MUST have the same length for all relevant instances of the problem. If
|
The returned array MUST have the same length for all relevant instances of the problem. If
|
||||||
two instances map into arrays of different lengths, they cannot be solved by the same
|
two instances map into arrays of different lengths, they cannot be solved by the same
|
||||||
LearningSolver object.
|
LearningSolver object.
|
||||||
@@ -52,16 +52,16 @@ class Instance(ABC):
|
|||||||
"""
|
"""
|
||||||
Returns a 1-dimensional array of (numerical) features describing a particular decision
|
Returns a 1-dimensional array of (numerical) features describing a particular decision
|
||||||
variable.
|
variable.
|
||||||
|
|
||||||
The argument `var` is a pyomo.core.Var object, which represents a collection of decision
|
The argument `var` is a pyomo.core.Var object, which represents a collection of decision
|
||||||
variables. The argument `index` specifies which variable in the collection is the relevant
|
variables. The argument `index` specifies which variable in the collection is the relevant
|
||||||
one.
|
one.
|
||||||
|
|
||||||
In combination with instance features, variable features are used by LearningSolver to
|
In combination with instance features, variable features are used by LearningSolver to
|
||||||
predict, among other things, the optimal value of each decision variable before the
|
predict, among other things, the optimal value of each decision variable before the
|
||||||
optimization takes place. In the knapsack problem, for example, an implementation could
|
optimization takes place. In the knapsack problem, for example, an implementation could
|
||||||
provide as variable features the weight and the price of a specific item.
|
provide as variable features the weight and the price of a specific item.
|
||||||
|
|
||||||
Like instance features, the arrays returned by this method MUST have the same length for
|
Like instance features, the arrays returned by this method MUST have the same length for
|
||||||
all variables within the same category, for all relevant instances of the problem.
|
all variables within the same category, for all relevant instances of the problem.
|
||||||
|
|
||||||
@@ -73,7 +73,7 @@ class Instance(ABC):
|
|||||||
"""
|
"""
|
||||||
Returns the category (a string, an integer or any hashable type) for each decision
|
Returns the category (a string, an integer or any hashable type) for each decision
|
||||||
variable.
|
variable.
|
||||||
|
|
||||||
If two variables have the same category, LearningSolver will use the same internal ML
|
If two variables have the same category, LearningSolver will use the same internal ML
|
||||||
model to predict the values of both variables. If the returned category is None, ML
|
model to predict the values of both variables. If the returned category is None, ML
|
||||||
models will ignore the variable.
|
models will ignore the variable.
|
||||||
@@ -100,18 +100,18 @@ class Instance(ABC):
|
|||||||
def find_violated_lazy_constraints(self, model):
|
def find_violated_lazy_constraints(self, model):
|
||||||
"""
|
"""
|
||||||
Returns lazy constraint violations found for the current solution.
|
Returns lazy constraint violations found for the current solution.
|
||||||
|
|
||||||
After solving a model, LearningSolver will ask the instance to identify which lazy
|
After solving a model, LearningSolver will ask the instance to identify which lazy
|
||||||
constraints are violated by the current solution. For each identified violation,
|
constraints are violated by the current solution. For each identified violation,
|
||||||
LearningSolver will then call the build_lazy_constraint, add the generated Pyomo
|
LearningSolver will then call the build_lazy_constraint, add the generated Pyomo
|
||||||
constraint to the model, then resolve the problem. The process repeats until no further
|
constraint to the model, then resolve the problem. The process repeats until no further
|
||||||
lazy constraint violations are found.
|
lazy constraint violations are found.
|
||||||
|
|
||||||
Each "violation" is simply a string, a tuple or any other hashable type which allows the
|
Each "violation" is simply a string, a tuple or any other hashable type which allows the
|
||||||
instance to identify unambiguously which lazy constraint should be generated. In the
|
instance to identify unambiguously which lazy constraint should be generated. In the
|
||||||
Traveling Salesman Problem, for example, a subtour violation could be a frozen set
|
Traveling Salesman Problem, for example, a subtour violation could be a frozen set
|
||||||
containing the cities in the subtour.
|
containing the cities in the subtour.
|
||||||
|
|
||||||
For a concrete example, see TravelingSalesmanInstance.
|
For a concrete example, see TravelingSalesmanInstance.
|
||||||
"""
|
"""
|
||||||
return []
|
return []
|
||||||
@@ -119,17 +119,17 @@ class Instance(ABC):
|
|||||||
def build_lazy_constraint(self, model, violation):
|
def build_lazy_constraint(self, model, violation):
|
||||||
"""
|
"""
|
||||||
Returns a Pyomo constraint which fixes a given violation.
|
Returns a Pyomo constraint which fixes a given violation.
|
||||||
|
|
||||||
This method is typically called immediately after find_violated_lazy_constraints. The violation object
|
This method is typically called immediately after find_violated_lazy_constraints. The violation object
|
||||||
provided to this method is exactly the same object returned earlier by find_violated_lazy_constraints.
|
provided to this method is exactly the same object returned earlier by find_violated_lazy_constraints.
|
||||||
After some training, LearningSolver may decide to proactively build some lazy constraints
|
After some training, LearningSolver may decide to proactively build some lazy constraints
|
||||||
at the beginning of the optimization process, before a solution is even available. In this
|
at the beginning of the optimization process, before a solution is even available. In this
|
||||||
case, build_lazy_constraints will be called without a corresponding call to
|
case, build_lazy_constraints will be called without a corresponding call to
|
||||||
find_violated_lazy_constraints.
|
find_violated_lazy_constraints.
|
||||||
|
|
||||||
The implementation should not directly add the constraint to the model. The constraint
|
The implementation should not directly add the constraint to the model. The constraint
|
||||||
will be added by LearningSolver after the method returns.
|
will be added by LearningSolver after the method returns.
|
||||||
|
|
||||||
For a concrete example, see TravelingSalesmanInstance.
|
For a concrete example, see TravelingSalesmanInstance.
|
||||||
"""
|
"""
|
||||||
pass
|
pass
|
||||||
@@ -141,11 +141,11 @@ class Instance(ABC):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
def load(self, filename):
|
def load(self, filename):
|
||||||
with gzip.GzipFile(filename, 'r') as f:
|
with gzip.GzipFile(filename, "r") as f:
|
||||||
data = json.loads(f.read().decode('utf-8'))
|
data = json.loads(f.read().decode("utf-8"))
|
||||||
self.__dict__ = data
|
self.__dict__ = data
|
||||||
|
|
||||||
def dump(self, filename):
|
def dump(self, filename):
|
||||||
data = json.dumps(self.__dict__, indent=2).encode('utf-8')
|
data = json.dumps(self.__dict__, indent=2).encode("utf-8")
|
||||||
with gzip.GzipFile(filename, 'w') as f:
|
with gzip.GzipFile(filename, "w") as f:
|
||||||
f.write(data)
|
f.write(data)
|
||||||
|
|||||||
@@ -7,7 +7,8 @@ import logging
|
|||||||
import time
|
import time
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
class TimeFormatter():
|
|
||||||
|
class TimeFormatter:
|
||||||
def __init__(self, start_time, log_colors):
|
def __init__(self, start_time, log_colors):
|
||||||
self.start_time = start_time
|
self.start_time = start_time
|
||||||
self.log_colors = log_colors
|
self.log_colors = log_colors
|
||||||
@@ -19,21 +20,23 @@ class TimeFormatter():
|
|||||||
color = self.log_colors["yellow"]
|
color = self.log_colors["yellow"]
|
||||||
else:
|
else:
|
||||||
color = self.log_colors["green"]
|
color = self.log_colors["green"]
|
||||||
return "%s[%12.3f]%s %s" % (color,
|
return "%s[%12.3f]%s %s" % (
|
||||||
record.created - self.start_time,
|
color,
|
||||||
self.log_colors["reset"],
|
record.created - self.start_time,
|
||||||
record.getMessage())
|
self.log_colors["reset"],
|
||||||
|
record.getMessage(),
|
||||||
|
)
|
||||||
|
|
||||||
def setup_logger(start_time=None,
|
|
||||||
force_color=False):
|
def setup_logger(start_time=None, force_color=False):
|
||||||
if start_time is None:
|
if start_time is None:
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
if sys.stdout.isatty() or force_color:
|
if sys.stdout.isatty() or force_color:
|
||||||
log_colors = {
|
log_colors = {
|
||||||
"green": '\033[92m',
|
"green": "\033[92m",
|
||||||
"yellow": '\033[93m',
|
"yellow": "\033[93m",
|
||||||
"red": '\033[91m',
|
"red": "\033[91m",
|
||||||
"reset": '\033[0m',
|
"reset": "\033[0m",
|
||||||
}
|
}
|
||||||
else:
|
else:
|
||||||
log_colors = {
|
log_colors = {
|
||||||
@@ -41,7 +44,7 @@ def setup_logger(start_time=None,
|
|||||||
"yellow": "",
|
"yellow": "",
|
||||||
"red": "",
|
"red": "",
|
||||||
"reset": "",
|
"reset": "",
|
||||||
}
|
}
|
||||||
handler = logging.StreamHandler()
|
handler = logging.StreamHandler()
|
||||||
handler.setFormatter(TimeFormatter(start_time, log_colors))
|
handler.setFormatter(TimeFormatter(start_time, log_colors))
|
||||||
logging.getLogger().addHandler(handler)
|
logging.getLogger().addHandler(handler)
|
||||||
|
|||||||
@@ -17,44 +17,45 @@ class ChallengeA:
|
|||||||
- K = 500, u ~ U(0., 1.)
|
- K = 500, u ~ U(0., 1.)
|
||||||
- alpha = 0.25
|
- alpha = 0.25
|
||||||
"""
|
"""
|
||||||
def __init__(self,
|
|
||||||
seed=42,
|
def __init__(
|
||||||
n_training_instances=500,
|
self,
|
||||||
n_test_instances=50):
|
seed=42,
|
||||||
|
n_training_instances=500,
|
||||||
|
n_test_instances=50,
|
||||||
|
):
|
||||||
|
|
||||||
np.random.seed(seed)
|
np.random.seed(seed)
|
||||||
self.gen = MultiKnapsackGenerator(n=randint(low=250, high=251),
|
self.gen = MultiKnapsackGenerator(
|
||||||
m=randint(low=10, high=11),
|
n=randint(low=250, high=251),
|
||||||
w=uniform(loc=0.0, scale=1000.0),
|
m=randint(low=10, high=11),
|
||||||
K=uniform(loc=500.0, scale=0.0),
|
w=uniform(loc=0.0, scale=1000.0),
|
||||||
u=uniform(loc=0.0, scale=1.0),
|
K=uniform(loc=500.0, scale=0.0),
|
||||||
alpha=uniform(loc=0.25, scale=0.0),
|
u=uniform(loc=0.0, scale=1.0),
|
||||||
fix_w=True,
|
alpha=uniform(loc=0.25, scale=0.0),
|
||||||
w_jitter=uniform(loc=0.95, scale=0.1),
|
fix_w=True,
|
||||||
)
|
w_jitter=uniform(loc=0.95, scale=0.1),
|
||||||
|
)
|
||||||
np.random.seed(seed + 1)
|
np.random.seed(seed + 1)
|
||||||
self.training_instances = self.gen.generate(n_training_instances)
|
self.training_instances = self.gen.generate(n_training_instances)
|
||||||
|
|
||||||
np.random.seed(seed + 2)
|
np.random.seed(seed + 2)
|
||||||
self.test_instances = self.gen.generate(n_test_instances)
|
self.test_instances = self.gen.generate(n_test_instances)
|
||||||
|
|
||||||
|
|
||||||
class MultiKnapsackInstance(Instance):
|
class MultiKnapsackInstance(Instance):
|
||||||
"""Representation of the Multidimensional 0-1 Knapsack Problem.
|
"""Representation of the Multidimensional 0-1 Knapsack Problem.
|
||||||
|
|
||||||
Given a set of n items and m knapsacks, the problem is to find a subset of items S maximizing
|
Given a set of n items and m knapsacks, the problem is to find a subset of items S maximizing
|
||||||
sum(prices[i] for i in S). If selected, each item i occupies weights[i,j] units of space in
|
sum(prices[i] for i in S). If selected, each item i occupies weights[i,j] units of space in
|
||||||
each knapsack j. Furthermore, each knapsack j has limited storage space, given by capacities[j].
|
each knapsack j. Furthermore, each knapsack j has limited storage space, given by capacities[j].
|
||||||
|
|
||||||
This implementation assigns a different category for each decision variable, and therefore
|
This implementation assigns a different category for each decision variable, and therefore
|
||||||
trains one ML model per variable. It is only suitable when training and test instances have
|
trains one ML model per variable. It is only suitable when training and test instances have
|
||||||
same size and items don't shuffle around.
|
same size and items don't shuffle around.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self,
|
def __init__(self, prices, capacities, weights):
|
||||||
prices,
|
|
||||||
capacities,
|
|
||||||
weights):
|
|
||||||
assert isinstance(prices, np.ndarray)
|
assert isinstance(prices, np.ndarray)
|
||||||
assert isinstance(capacities, np.ndarray)
|
assert isinstance(capacities, np.ndarray)
|
||||||
assert isinstance(weights, np.ndarray)
|
assert isinstance(weights, np.ndarray)
|
||||||
@@ -65,83 +66,92 @@ class MultiKnapsackInstance(Instance):
|
|||||||
self.prices = prices
|
self.prices = prices
|
||||||
self.capacities = capacities
|
self.capacities = capacities
|
||||||
self.weights = weights
|
self.weights = weights
|
||||||
|
|
||||||
def to_model(self):
|
def to_model(self):
|
||||||
model = pe.ConcreteModel()
|
model = pe.ConcreteModel()
|
||||||
model.x = pe.Var(range(self.n), domain=pe.Binary)
|
model.x = pe.Var(range(self.n), domain=pe.Binary)
|
||||||
model.OBJ = pe.Objective(rule=lambda model: sum(model.x[j] * self.prices[j]
|
model.OBJ = pe.Objective(
|
||||||
for j in range(self.n)),
|
rule=lambda model: sum(model.x[j] * self.prices[j] for j in range(self.n)),
|
||||||
sense=pe.maximize)
|
sense=pe.maximize,
|
||||||
|
)
|
||||||
model.eq_capacity = pe.ConstraintList()
|
model.eq_capacity = pe.ConstraintList()
|
||||||
for i in range(self.m):
|
for i in range(self.m):
|
||||||
model.eq_capacity.add(sum(model.x[j] * self.weights[i,j]
|
model.eq_capacity.add(
|
||||||
for j in range(self.n)) <= self.capacities[i])
|
sum(model.x[j] * self.weights[i, j] for j in range(self.n))
|
||||||
|
<= self.capacities[i]
|
||||||
|
)
|
||||||
|
|
||||||
return model
|
return model
|
||||||
|
|
||||||
def get_instance_features(self):
|
def get_instance_features(self):
|
||||||
return np.hstack([
|
return np.hstack(
|
||||||
np.mean(self.prices),
|
[
|
||||||
self.capacities,
|
np.mean(self.prices),
|
||||||
])
|
self.capacities,
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
def get_variable_features(self, var, index):
|
def get_variable_features(self, var, index):
|
||||||
return np.hstack([
|
return np.hstack(
|
||||||
self.prices[index],
|
[
|
||||||
self.weights[:, index],
|
self.prices[index],
|
||||||
])
|
self.weights[:, index],
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
# def get_variable_category(self, var, index):
|
# def get_variable_category(self, var, index):
|
||||||
# return index
|
# return index
|
||||||
|
|
||||||
|
|
||||||
class MultiKnapsackGenerator:
|
class MultiKnapsackGenerator:
|
||||||
def __init__(self,
|
def __init__(
|
||||||
n=randint(low=100, high=101),
|
self,
|
||||||
m=randint(low=30, high=31),
|
n=randint(low=100, high=101),
|
||||||
w=randint(low=0, high=1000),
|
m=randint(low=30, high=31),
|
||||||
K=randint(low=500, high=500),
|
w=randint(low=0, high=1000),
|
||||||
u=uniform(loc=0.0, scale=1.0),
|
K=randint(low=500, high=500),
|
||||||
alpha=uniform(loc=0.25, scale=0.0),
|
u=uniform(loc=0.0, scale=1.0),
|
||||||
fix_w=False,
|
alpha=uniform(loc=0.25, scale=0.0),
|
||||||
w_jitter=uniform(loc=1.0, scale=0.0),
|
fix_w=False,
|
||||||
round=True,
|
w_jitter=uniform(loc=1.0, scale=0.0),
|
||||||
):
|
round=True,
|
||||||
|
):
|
||||||
"""Initialize the problem generator.
|
"""Initialize the problem generator.
|
||||||
|
|
||||||
Instances have a random number of items (or variables) and a random number of knapsacks
|
Instances have a random number of items (or variables) and a random number of knapsacks
|
||||||
(or constraints), as specified by the provided probability distributions `n` and `m`,
|
(or constraints), as specified by the provided probability distributions `n` and `m`,
|
||||||
respectively. The weight of each item `i` on knapsack `j` is sampled independently from
|
respectively. The weight of each item `i` on knapsack `j` is sampled independently from
|
||||||
the provided distribution `w`. The capacity of knapsack `j` is set to:
|
the provided distribution `w`. The capacity of knapsack `j` is set to:
|
||||||
|
|
||||||
alpha_j * sum(w[i,j] for i in range(n)),
|
alpha_j * sum(w[i,j] for i in range(n)),
|
||||||
|
|
||||||
where `alpha_j`, the tightness ratio, is sampled from the provided probability
|
where `alpha_j`, the tightness ratio, is sampled from the provided probability
|
||||||
distribution `alpha`. To make the instances more challenging, the costs of the items
|
distribution `alpha`. To make the instances more challenging, the costs of the items
|
||||||
are linearly correlated to their average weights. More specifically, the weight of each
|
are linearly correlated to their average weights. More specifically, the weight of each
|
||||||
item `i` is set to:
|
item `i` is set to:
|
||||||
|
|
||||||
sum(w[i,j]/m for j in range(m)) + K * u_i,
|
sum(w[i,j]/m for j in range(m)) + K * u_i,
|
||||||
|
|
||||||
where `K`, the correlation coefficient, and `u_i`, the correlation multiplier, are sampled
|
where `K`, the correlation coefficient, and `u_i`, the correlation multiplier, are sampled
|
||||||
from the provided probability distributions. Note that `K` is only sample once for the
|
from the provided probability distributions. Note that `K` is only sample once for the
|
||||||
entire instance.
|
entire instance.
|
||||||
|
|
||||||
If fix_w=True is provided, then w[i,j] are kept the same in all generated instances. This
|
If fix_w=True is provided, then w[i,j] are kept the same in all generated instances. This
|
||||||
also implies that n and m are kept fixed. Although the prices and capacities are derived
|
also implies that n and m are kept fixed. Although the prices and capacities are derived
|
||||||
from w[i,j], as long as u and K are not constants, the generated instances will still not
|
from w[i,j], as long as u and K are not constants, the generated instances will still not
|
||||||
be completely identical.
|
be completely identical.
|
||||||
|
|
||||||
If a probability distribution w_jitter is provided, then item weights will be set to
|
If a probability distribution w_jitter is provided, then item weights will be set to
|
||||||
w[i,j] * gamma[i,j] where gamma[i,j] is sampled from w_jitter. When combined with
|
w[i,j] * gamma[i,j] where gamma[i,j] is sampled from w_jitter. When combined with
|
||||||
fix_w=True, this argument may be used to generate instances where the weight of each item
|
fix_w=True, this argument may be used to generate instances where the weight of each item
|
||||||
is roughly the same, but not exactly identical, across all instances. The prices of the
|
is roughly the same, but not exactly identical, across all instances. The prices of the
|
||||||
items and the capacities of the knapsacks will be calculated as above, but using these
|
items and the capacities of the knapsacks will be calculated as above, but using these
|
||||||
perturbed weights instead.
|
perturbed weights instead.
|
||||||
|
|
||||||
By default, all generated prices, weights and capacities are rounded to the nearest integer
|
By default, all generated prices, weights and capacities are rounded to the nearest integer
|
||||||
number. If `round=False` is provided, this rounding will be disabled.
|
number. If `round=False` is provided, this rounding will be disabled.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
n: rv_discrete
|
n: rv_discrete
|
||||||
@@ -168,11 +178,14 @@ class MultiKnapsackGenerator:
|
|||||||
assert isinstance(w, rv_frozen), "w should be a SciPy probability distribution"
|
assert isinstance(w, rv_frozen), "w should be a SciPy probability distribution"
|
||||||
assert isinstance(K, rv_frozen), "K should be a SciPy probability distribution"
|
assert isinstance(K, rv_frozen), "K should be a SciPy probability distribution"
|
||||||
assert isinstance(u, rv_frozen), "u should be a SciPy probability distribution"
|
assert isinstance(u, rv_frozen), "u should be a SciPy probability distribution"
|
||||||
assert isinstance(alpha, rv_frozen), "alpha should be a SciPy probability distribution"
|
assert isinstance(
|
||||||
|
alpha, rv_frozen
|
||||||
|
), "alpha should be a SciPy probability distribution"
|
||||||
assert isinstance(fix_w, bool), "fix_w should be boolean"
|
assert isinstance(fix_w, bool), "fix_w should be boolean"
|
||||||
assert isinstance(w_jitter, rv_frozen), \
|
assert isinstance(
|
||||||
"w_jitter should be a SciPy probability distribution"
|
w_jitter, rv_frozen
|
||||||
|
), "w_jitter should be a SciPy probability distribution"
|
||||||
|
|
||||||
self.n = n
|
self.n = n
|
||||||
self.m = m
|
self.m = m
|
||||||
self.w = w
|
self.w = w
|
||||||
@@ -181,7 +194,7 @@ class MultiKnapsackGenerator:
|
|||||||
self.alpha = alpha
|
self.alpha = alpha
|
||||||
self.w_jitter = w_jitter
|
self.w_jitter = w_jitter
|
||||||
self.round = round
|
self.round = round
|
||||||
|
|
||||||
if fix_w:
|
if fix_w:
|
||||||
self.fix_n = self.n.rvs()
|
self.fix_n = self.n.rvs()
|
||||||
self.fix_m = self.m.rvs()
|
self.fix_m = self.m.rvs()
|
||||||
@@ -194,7 +207,7 @@ class MultiKnapsackGenerator:
|
|||||||
self.fix_w = None
|
self.fix_w = None
|
||||||
self.fix_u = None
|
self.fix_u = None
|
||||||
self.fix_K = None
|
self.fix_K = None
|
||||||
|
|
||||||
def generate(self, n_samples):
|
def generate(self, n_samples):
|
||||||
def _sample():
|
def _sample():
|
||||||
if self.fix_w is not None:
|
if self.fix_w is not None:
|
||||||
@@ -211,20 +224,22 @@ class MultiKnapsackGenerator:
|
|||||||
K = self.K.rvs()
|
K = self.K.rvs()
|
||||||
w = w * np.array([self.w_jitter.rvs(n) for _ in range(m)])
|
w = w * np.array([self.w_jitter.rvs(n) for _ in range(m)])
|
||||||
alpha = self.alpha.rvs(m)
|
alpha = self.alpha.rvs(m)
|
||||||
p = np.array([w[:,j].sum() / m + K * u[j] for j in range(n)])
|
p = np.array([w[:, j].sum() / m + K * u[j] for j in range(n)])
|
||||||
b = np.array([w[i,:].sum() * alpha[i] for i in range(m)])
|
b = np.array([w[i, :].sum() * alpha[i] for i in range(m)])
|
||||||
if self.round:
|
if self.round:
|
||||||
p = p.round()
|
p = p.round()
|
||||||
b = b.round()
|
b = b.round()
|
||||||
w = w.round()
|
w = w.round()
|
||||||
return MultiKnapsackInstance(p, b, w)
|
return MultiKnapsackInstance(p, b, w)
|
||||||
|
|
||||||
return [_sample() for _ in range(n_samples)]
|
return [_sample() for _ in range(n_samples)]
|
||||||
|
|
||||||
|
|
||||||
class KnapsackInstance(Instance):
|
class KnapsackInstance(Instance):
|
||||||
"""
|
"""
|
||||||
Simpler (one-dimensional) Knapsack Problem, used for testing.
|
Simpler (one-dimensional) Knapsack Problem, used for testing.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, weights, prices, capacity):
|
def __init__(self, weights, prices, capacity):
|
||||||
self.weights = weights
|
self.weights = weights
|
||||||
self.prices = prices
|
self.prices = prices
|
||||||
@@ -234,23 +249,29 @@ class KnapsackInstance(Instance):
|
|||||||
model = pe.ConcreteModel()
|
model = pe.ConcreteModel()
|
||||||
items = range(len(self.weights))
|
items = range(len(self.weights))
|
||||||
model.x = pe.Var(items, domain=pe.Binary)
|
model.x = pe.Var(items, domain=pe.Binary)
|
||||||
model.OBJ = pe.Objective(expr=sum(model.x[v] * self.prices[v] for v in items),
|
model.OBJ = pe.Objective(
|
||||||
sense=pe.maximize)
|
expr=sum(model.x[v] * self.prices[v] for v in items), sense=pe.maximize
|
||||||
model.eq_capacity = pe.Constraint(expr=sum(model.x[v] * self.weights[v]
|
)
|
||||||
for v in items) <= self.capacity)
|
model.eq_capacity = pe.Constraint(
|
||||||
|
expr=sum(model.x[v] * self.weights[v] for v in items) <= self.capacity
|
||||||
|
)
|
||||||
return model
|
return model
|
||||||
|
|
||||||
def get_instance_features(self):
|
def get_instance_features(self):
|
||||||
return np.array([
|
return np.array(
|
||||||
self.capacity,
|
[
|
||||||
np.average(self.weights),
|
self.capacity,
|
||||||
])
|
np.average(self.weights),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
def get_variable_features(self, var, index):
|
def get_variable_features(self, var, index):
|
||||||
return np.array([
|
return np.array(
|
||||||
self.weights[index],
|
[
|
||||||
self.prices[index],
|
self.weights[index],
|
||||||
])
|
self.prices[index],
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class GurobiKnapsackInstance(KnapsackInstance):
|
class GurobiKnapsackInstance(KnapsackInstance):
|
||||||
@@ -258,6 +279,7 @@ class GurobiKnapsackInstance(KnapsackInstance):
|
|||||||
Simpler (one-dimensional) knapsack instance, implemented directly in Gurobi
|
Simpler (one-dimensional) knapsack instance, implemented directly in Gurobi
|
||||||
instead of Pyomo, used for testing.
|
instead of Pyomo, used for testing.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, weights, prices, capacity):
|
def __init__(self, weights, prices, capacity):
|
||||||
super().__init__(weights, prices, capacity)
|
super().__init__(weights, prices, capacity)
|
||||||
|
|
||||||
@@ -268,9 +290,11 @@ class GurobiKnapsackInstance(KnapsackInstance):
|
|||||||
model = gp.Model("Knapsack")
|
model = gp.Model("Knapsack")
|
||||||
n = len(self.weights)
|
n = len(self.weights)
|
||||||
x = model.addVars(n, vtype=GRB.BINARY, name="x")
|
x = model.addVars(n, vtype=GRB.BINARY, name="x")
|
||||||
model.addConstr(gp.quicksum(x[i] * self.weights[i]
|
model.addConstr(
|
||||||
for i in range(n)) <= self.capacity,
|
gp.quicksum(x[i] * self.weights[i] for i in range(n)) <= self.capacity,
|
||||||
"eq_capacity")
|
"eq_capacity",
|
||||||
model.setObjective(gp.quicksum(x[i] * self.prices[i]
|
)
|
||||||
for i in range(n)), GRB.MAXIMIZE)
|
model.setObjective(
|
||||||
|
gp.quicksum(x[i] * self.prices[i] for i in range(n)), GRB.MAXIMIZE
|
||||||
|
)
|
||||||
return model
|
return model
|
||||||
|
|||||||
@@ -12,44 +12,49 @@ from scipy.stats.distributions import rv_frozen
|
|||||||
|
|
||||||
|
|
||||||
class ChallengeA:
|
class ChallengeA:
|
||||||
def __init__(self,
|
def __init__(
|
||||||
seed=42,
|
self,
|
||||||
n_training_instances=500,
|
seed=42,
|
||||||
n_test_instances=50,
|
n_training_instances=500,
|
||||||
):
|
n_test_instances=50,
|
||||||
|
):
|
||||||
|
|
||||||
np.random.seed(seed)
|
np.random.seed(seed)
|
||||||
self.generator = MaxWeightStableSetGenerator(w=uniform(loc=100., scale=50.),
|
self.generator = MaxWeightStableSetGenerator(
|
||||||
n=randint(low=200, high=201),
|
w=uniform(loc=100.0, scale=50.0),
|
||||||
p=uniform(loc=0.05, scale=0.0),
|
n=randint(low=200, high=201),
|
||||||
fix_graph=True)
|
p=uniform(loc=0.05, scale=0.0),
|
||||||
|
fix_graph=True,
|
||||||
|
)
|
||||||
|
|
||||||
np.random.seed(seed + 1)
|
np.random.seed(seed + 1)
|
||||||
self.training_instances = self.generator.generate(n_training_instances)
|
self.training_instances = self.generator.generate(n_training_instances)
|
||||||
|
|
||||||
np.random.seed(seed + 2)
|
np.random.seed(seed + 2)
|
||||||
self.test_instances = self.generator.generate(n_test_instances)
|
self.test_instances = self.generator.generate(n_test_instances)
|
||||||
|
|
||||||
|
|
||||||
class MaxWeightStableSetGenerator:
|
class MaxWeightStableSetGenerator:
|
||||||
"""Random instance generator for the Maximum-Weight Stable Set Problem.
|
"""Random instance generator for the Maximum-Weight Stable Set Problem.
|
||||||
|
|
||||||
The generator has two modes of operation. When `fix_graph=True` is provided, one random
|
The generator has two modes of operation. When `fix_graph=True` is provided, one random
|
||||||
Erdős-Rényi graph $G_{n,p}$ is generated in the constructor, where $n$ and $p$ are sampled
|
Erdős-Rényi graph $G_{n,p}$ is generated in the constructor, where $n$ and $p$ are sampled
|
||||||
from user-provided probability distributions `n` and `p`. To generate each instance, the
|
from user-provided probability distributions `n` and `p`. To generate each instance, the
|
||||||
generator independently samples each $w_v$ from the user-provided probability distribution `w`.
|
generator independently samples each $w_v$ from the user-provided probability distribution `w`.
|
||||||
|
|
||||||
When `fix_graph=False`, a new random graph is generated for each instance; the remaining
|
When `fix_graph=False`, a new random graph is generated for each instance; the remaining
|
||||||
parameters are sampled in the same way.
|
parameters are sampled in the same way.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self,
|
def __init__(
|
||||||
w=uniform(loc=10.0, scale=1.0),
|
self,
|
||||||
n=randint(low=250, high=251),
|
w=uniform(loc=10.0, scale=1.0),
|
||||||
p=uniform(loc=0.05, scale=0.0),
|
n=randint(low=250, high=251),
|
||||||
fix_graph=True):
|
p=uniform(loc=0.05, scale=0.0),
|
||||||
|
fix_graph=True,
|
||||||
|
):
|
||||||
"""Initialize the problem generator.
|
"""Initialize the problem generator.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
w: rv_continuous
|
w: rv_continuous
|
||||||
@@ -69,7 +74,7 @@ class MaxWeightStableSetGenerator:
|
|||||||
self.graph = None
|
self.graph = None
|
||||||
if fix_graph:
|
if fix_graph:
|
||||||
self.graph = self._generate_graph()
|
self.graph = self._generate_graph()
|
||||||
|
|
||||||
def generate(self, n_samples):
|
def generate(self, n_samples):
|
||||||
def _sample():
|
def _sample():
|
||||||
if self.graph is not None:
|
if self.graph is not None:
|
||||||
@@ -78,22 +83,23 @@ class MaxWeightStableSetGenerator:
|
|||||||
graph = self._generate_graph()
|
graph = self._generate_graph()
|
||||||
weights = self.w.rvs(graph.number_of_nodes())
|
weights = self.w.rvs(graph.number_of_nodes())
|
||||||
return MaxWeightStableSetInstance(graph, weights)
|
return MaxWeightStableSetInstance(graph, weights)
|
||||||
|
|
||||||
return [_sample() for _ in range(n_samples)]
|
return [_sample() for _ in range(n_samples)]
|
||||||
|
|
||||||
def _generate_graph(self):
|
def _generate_graph(self):
|
||||||
return nx.generators.random_graphs.binomial_graph(self.n.rvs(), self.p.rvs())
|
return nx.generators.random_graphs.binomial_graph(self.n.rvs(), self.p.rvs())
|
||||||
|
|
||||||
|
|
||||||
class MaxWeightStableSetInstance(Instance):
|
class MaxWeightStableSetInstance(Instance):
|
||||||
"""An instance of the Maximum-Weight Stable Set Problem.
|
"""An instance of the Maximum-Weight Stable Set Problem.
|
||||||
|
|
||||||
Given a graph G=(V,E) and a weight w_v for each vertex v, the problem asks for a stable
|
Given a graph G=(V,E) and a weight w_v for each vertex v, the problem asks for a stable
|
||||||
set S of G maximizing sum(w_v for v in S). A stable set (also called independent set) is
|
set S of G maximizing sum(w_v for v in S). A stable set (also called independent set) is
|
||||||
a subset of vertices, no two of which are adjacent.
|
a subset of vertices, no two of which are adjacent.
|
||||||
|
|
||||||
This is one of Karp's 21 NP-complete problems.
|
This is one of Karp's 21 NP-complete problems.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, graph, weights):
|
def __init__(self, graph, weights):
|
||||||
self.graph = graph
|
self.graph = graph
|
||||||
self.weights = weights
|
self.weights = weights
|
||||||
@@ -102,13 +108,14 @@ class MaxWeightStableSetInstance(Instance):
|
|||||||
nodes = list(self.graph.nodes)
|
nodes = list(self.graph.nodes)
|
||||||
model = pe.ConcreteModel()
|
model = pe.ConcreteModel()
|
||||||
model.x = pe.Var(nodes, domain=pe.Binary)
|
model.x = pe.Var(nodes, domain=pe.Binary)
|
||||||
model.OBJ = pe.Objective(expr=sum(model.x[v] * self.weights[v] for v in nodes),
|
model.OBJ = pe.Objective(
|
||||||
sense=pe.maximize)
|
expr=sum(model.x[v] * self.weights[v] for v in nodes), sense=pe.maximize
|
||||||
|
)
|
||||||
model.clique_eqs = pe.ConstraintList()
|
model.clique_eqs = pe.ConstraintList()
|
||||||
for clique in nx.find_cliques(self.graph):
|
for clique in nx.find_cliques(self.graph):
|
||||||
model.clique_eqs.add(sum(model.x[i] for i in clique) <= 1)
|
model.clique_eqs.add(sum(model.x[i] for i in clique) <= 1)
|
||||||
return model
|
return model
|
||||||
|
|
||||||
def get_instance_features(self):
|
def get_instance_features(self):
|
||||||
return np.ones(0)
|
return np.ones(0)
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||||
# Released under the modified BSD license. See COPYING.md for more details.
|
# Released under the modified BSD license. See COPYING.md for more details.
|
||||||
|
|
||||||
|
|||||||
@@ -9,17 +9,18 @@ import numpy as np
|
|||||||
|
|
||||||
|
|
||||||
def test_knapsack_generator():
|
def test_knapsack_generator():
|
||||||
gen = MultiKnapsackGenerator(n=randint(low=100, high=101),
|
gen = MultiKnapsackGenerator(
|
||||||
m=randint(low=30, high=31),
|
n=randint(low=100, high=101),
|
||||||
w=randint(low=0, high=1000),
|
m=randint(low=30, high=31),
|
||||||
K=randint(low=500, high=501),
|
w=randint(low=0, high=1000),
|
||||||
u=uniform(loc=1.0, scale=1.0),
|
K=randint(low=500, high=501),
|
||||||
alpha=uniform(loc=0.50, scale=0.0),
|
u=uniform(loc=1.0, scale=1.0),
|
||||||
)
|
alpha=uniform(loc=0.50, scale=0.0),
|
||||||
|
)
|
||||||
instances = gen.generate(100)
|
instances = gen.generate(100)
|
||||||
w_sum = sum(instance.weights for instance in instances) / len(instances)
|
w_sum = sum(instance.weights for instance in instances) / len(instances)
|
||||||
p_sum = sum(instance.prices for instance in instances) / len(instances)
|
p_sum = sum(instance.prices for instance in instances) / len(instances)
|
||||||
b_sum = sum(instance.capacities for instance in instances) / len(instances)
|
b_sum = sum(instance.capacities for instance in instances) / len(instances)
|
||||||
assert round(np.mean(w_sum), -1) == 500.
|
assert round(np.mean(w_sum), -1) == 500.0
|
||||||
# assert round(np.mean(p_sum), -1) == 1200. # flaky
|
# assert round(np.mean(p_sum), -1) == 1200. # flaky
|
||||||
assert round(np.mean(b_sum), -3) == 25000.
|
assert round(np.mean(b_sum), -3) == 25000.0
|
||||||
|
|||||||
@@ -11,36 +11,42 @@ from scipy.stats import uniform, randint
|
|||||||
|
|
||||||
def test_stab():
|
def test_stab():
|
||||||
graph = nx.cycle_graph(5)
|
graph = nx.cycle_graph(5)
|
||||||
weights = [1., 1., 1., 1., 1.]
|
weights = [1.0, 1.0, 1.0, 1.0, 1.0]
|
||||||
instance = MaxWeightStableSetInstance(graph, weights)
|
instance = MaxWeightStableSetInstance(graph, weights)
|
||||||
solver = LearningSolver()
|
solver = LearningSolver()
|
||||||
solver.solve(instance)
|
solver.solve(instance)
|
||||||
assert instance.lower_bound == 2.
|
assert instance.lower_bound == 2.0
|
||||||
|
|
||||||
|
|
||||||
def test_stab_generator_fixed_graph():
|
def test_stab_generator_fixed_graph():
|
||||||
np.random.seed(42)
|
np.random.seed(42)
|
||||||
from miplearn.problems.stab import MaxWeightStableSetGenerator
|
from miplearn.problems.stab import MaxWeightStableSetGenerator
|
||||||
gen = MaxWeightStableSetGenerator(w=uniform(loc=50., scale=10.),
|
|
||||||
n=randint(low=10, high=11),
|
gen = MaxWeightStableSetGenerator(
|
||||||
p=uniform(loc=0.05, scale=0.),
|
w=uniform(loc=50.0, scale=10.0),
|
||||||
fix_graph=True)
|
n=randint(low=10, high=11),
|
||||||
|
p=uniform(loc=0.05, scale=0.0),
|
||||||
|
fix_graph=True,
|
||||||
|
)
|
||||||
instances = gen.generate(1_000)
|
instances = gen.generate(1_000)
|
||||||
weights = np.array([instance.weights for instance in instances])
|
weights = np.array([instance.weights for instance in instances])
|
||||||
weights_avg_actual = np.round(np.average(weights, axis=0))
|
weights_avg_actual = np.round(np.average(weights, axis=0))
|
||||||
weights_avg_expected = [55.0] * 10
|
weights_avg_expected = [55.0] * 10
|
||||||
assert list(weights_avg_actual) == weights_avg_expected
|
assert list(weights_avg_actual) == weights_avg_expected
|
||||||
|
|
||||||
|
|
||||||
def test_stab_generator_random_graph():
|
def test_stab_generator_random_graph():
|
||||||
np.random.seed(42)
|
np.random.seed(42)
|
||||||
from miplearn.problems.stab import MaxWeightStableSetGenerator
|
from miplearn.problems.stab import MaxWeightStableSetGenerator
|
||||||
gen = MaxWeightStableSetGenerator(w=uniform(loc=50., scale=10.),
|
|
||||||
n=randint(low=30, high=41),
|
gen = MaxWeightStableSetGenerator(
|
||||||
p=uniform(loc=0.5, scale=0.),
|
w=uniform(loc=50.0, scale=10.0),
|
||||||
fix_graph=False)
|
n=randint(low=30, high=41),
|
||||||
|
p=uniform(loc=0.5, scale=0.0),
|
||||||
|
fix_graph=False,
|
||||||
|
)
|
||||||
instances = gen.generate(1_000)
|
instances = gen.generate(1_000)
|
||||||
n_nodes = [instance.graph.number_of_nodes() for instance in instances]
|
n_nodes = [instance.graph.number_of_nodes() for instance in instances]
|
||||||
n_edges = [instance.graph.number_of_edges() for instance in instances]
|
n_edges = [instance.graph.number_of_edges() for instance in instances]
|
||||||
assert np.round(np.mean(n_nodes)) == 35.
|
assert np.round(np.mean(n_nodes)) == 35.0
|
||||||
assert np.round(np.mean(n_edges), -1) == 300.
|
assert np.round(np.mean(n_edges), -1) == 300.0
|
||||||
|
|||||||
@@ -11,11 +11,13 @@ from scipy.stats import uniform, randint
|
|||||||
|
|
||||||
|
|
||||||
def test_generator():
|
def test_generator():
|
||||||
instances = TravelingSalesmanGenerator(x=uniform(loc=0.0, scale=1000.0),
|
instances = TravelingSalesmanGenerator(
|
||||||
y=uniform(loc=0.0, scale=1000.0),
|
x=uniform(loc=0.0, scale=1000.0),
|
||||||
n=randint(low=100, high=101),
|
y=uniform(loc=0.0, scale=1000.0),
|
||||||
gamma=uniform(loc=0.95, scale=0.1),
|
n=randint(low=100, high=101),
|
||||||
fix_cities=True).generate(100)
|
gamma=uniform(loc=0.95, scale=0.1),
|
||||||
|
fix_cities=True,
|
||||||
|
).generate(100)
|
||||||
assert len(instances) == 100
|
assert len(instances) == 100
|
||||||
assert instances[0].n_cities == 100
|
assert instances[0].n_cities == 100
|
||||||
assert norm(instances[0].distances - instances[0].distances.T) < 1e-6
|
assert norm(instances[0].distances - instances[0].distances.T) < 1e-6
|
||||||
@@ -25,14 +27,16 @@ def test_generator():
|
|||||||
|
|
||||||
def test_instance():
|
def test_instance():
|
||||||
n_cities = 4
|
n_cities = 4
|
||||||
distances = np.array([
|
distances = np.array(
|
||||||
[0., 1., 2., 1.],
|
[
|
||||||
[1., 0., 1., 2.],
|
[0.0, 1.0, 2.0, 1.0],
|
||||||
[2., 1., 0., 1.],
|
[1.0, 0.0, 1.0, 2.0],
|
||||||
[1., 2., 1., 0.],
|
[2.0, 1.0, 0.0, 1.0],
|
||||||
])
|
[1.0, 2.0, 1.0, 0.0],
|
||||||
|
]
|
||||||
|
)
|
||||||
instance = TravelingSalesmanInstance(n_cities, distances)
|
instance = TravelingSalesmanInstance(n_cities, distances)
|
||||||
for solver_name in ['gurobi']:
|
for solver_name in ["gurobi"]:
|
||||||
solver = LearningSolver(solver=solver_name)
|
solver = LearningSolver(solver=solver_name)
|
||||||
solver.solve(instance)
|
solver.solve(instance)
|
||||||
x = instance.solution["x"]
|
x = instance.solution["x"]
|
||||||
@@ -48,17 +52,19 @@ def test_instance():
|
|||||||
|
|
||||||
def test_subtour():
|
def test_subtour():
|
||||||
n_cities = 6
|
n_cities = 6
|
||||||
cities = np.array([
|
cities = np.array(
|
||||||
[0., 0.],
|
[
|
||||||
[1., 0.],
|
[0.0, 0.0],
|
||||||
[2., 0.],
|
[1.0, 0.0],
|
||||||
[3., 0.],
|
[2.0, 0.0],
|
||||||
[0., 1.],
|
[3.0, 0.0],
|
||||||
[3., 1.],
|
[0.0, 1.0],
|
||||||
])
|
[3.0, 1.0],
|
||||||
|
]
|
||||||
|
)
|
||||||
distances = squareform(pdist(cities))
|
distances = squareform(pdist(cities))
|
||||||
instance = TravelingSalesmanInstance(n_cities, distances)
|
instance = TravelingSalesmanInstance(n_cities, distances)
|
||||||
for solver_name in ['gurobi']:
|
for solver_name in ["gurobi"]:
|
||||||
solver = LearningSolver(solver=solver_name)
|
solver = LearningSolver(solver=solver_name)
|
||||||
solver.solve(instance)
|
solver.solve(instance)
|
||||||
assert hasattr(instance, "found_violated_lazy_constraints")
|
assert hasattr(instance, "found_violated_lazy_constraints")
|
||||||
|
|||||||
@@ -13,41 +13,44 @@ import random
|
|||||||
|
|
||||||
|
|
||||||
class ChallengeA:
|
class ChallengeA:
|
||||||
def __init__(self,
|
def __init__(
|
||||||
seed=42,
|
self,
|
||||||
n_training_instances=500,
|
seed=42,
|
||||||
n_test_instances=50,
|
n_training_instances=500,
|
||||||
):
|
n_test_instances=50,
|
||||||
|
):
|
||||||
|
|
||||||
np.random.seed(seed)
|
np.random.seed(seed)
|
||||||
self.generator = TravelingSalesmanGenerator(x=uniform(loc=0.0, scale=1000.0),
|
self.generator = TravelingSalesmanGenerator(
|
||||||
y=uniform(loc=0.0, scale=1000.0),
|
x=uniform(loc=0.0, scale=1000.0),
|
||||||
n=randint(low=350, high=351),
|
y=uniform(loc=0.0, scale=1000.0),
|
||||||
gamma=uniform(loc=0.95, scale=0.1),
|
n=randint(low=350, high=351),
|
||||||
fix_cities=True,
|
gamma=uniform(loc=0.95, scale=0.1),
|
||||||
round=True,
|
fix_cities=True,
|
||||||
)
|
round=True,
|
||||||
|
)
|
||||||
|
|
||||||
np.random.seed(seed + 1)
|
np.random.seed(seed + 1)
|
||||||
self.training_instances = self.generator.generate(n_training_instances)
|
self.training_instances = self.generator.generate(n_training_instances)
|
||||||
|
|
||||||
np.random.seed(seed + 2)
|
np.random.seed(seed + 2)
|
||||||
self.test_instances = self.generator.generate(n_test_instances)
|
self.test_instances = self.generator.generate(n_test_instances)
|
||||||
|
|
||||||
|
|
||||||
class TravelingSalesmanGenerator:
|
class TravelingSalesmanGenerator:
|
||||||
"""Random generator for the Traveling Salesman Problem."""
|
"""Random generator for the Traveling Salesman Problem."""
|
||||||
|
|
||||||
def __init__(self,
|
def __init__(
|
||||||
x=uniform(loc=0.0, scale=1000.0),
|
self,
|
||||||
y=uniform(loc=0.0, scale=1000.0),
|
x=uniform(loc=0.0, scale=1000.0),
|
||||||
n=randint(low=100, high=101),
|
y=uniform(loc=0.0, scale=1000.0),
|
||||||
gamma=uniform(loc=1.0, scale=0.0),
|
n=randint(low=100, high=101),
|
||||||
fix_cities=True,
|
gamma=uniform(loc=1.0, scale=0.0),
|
||||||
round=True,
|
fix_cities=True,
|
||||||
):
|
round=True,
|
||||||
|
):
|
||||||
"""Initializes the problem generator.
|
"""Initializes the problem generator.
|
||||||
|
|
||||||
Initially, the generator creates n cities (x_1,y_1),...,(x_n,y_n) where n, x_i and y_i are
|
Initially, the generator creates n cities (x_1,y_1),...,(x_n,y_n) where n, x_i and y_i are
|
||||||
sampled independently from the provided probability distributions `n`, `x` and `y`. For each
|
sampled independently from the provided probability distributions `n`, `x` and `y`. For each
|
||||||
(unordered) pair of cities (i,j), the distance d[i,j] between them is set to:
|
(unordered) pair of cities (i,j), the distance d[i,j] between them is set to:
|
||||||
@@ -58,7 +61,7 @@ class TravelingSalesmanGenerator:
|
|||||||
|
|
||||||
If fix_cities=True, the list of cities is kept the same for all generated instances. The
|
If fix_cities=True, the list of cities is kept the same for all generated instances. The
|
||||||
gamma values, and therefore also the distances, are still different.
|
gamma values, and therefore also the distances, are still different.
|
||||||
|
|
||||||
By default, all distances d[i,j] are rounded to the nearest integer. If `round=False`
|
By default, all distances d[i,j] are rounded to the nearest integer. If `round=False`
|
||||||
is provided, this rounding will be disabled.
|
is provided, this rounding will be disabled.
|
||||||
|
|
||||||
@@ -79,19 +82,22 @@ class TravelingSalesmanGenerator:
|
|||||||
assert isinstance(x, rv_frozen), "x should be a SciPy probability distribution"
|
assert isinstance(x, rv_frozen), "x should be a SciPy probability distribution"
|
||||||
assert isinstance(y, rv_frozen), "y should be a SciPy probability distribution"
|
assert isinstance(y, rv_frozen), "y should be a SciPy probability distribution"
|
||||||
assert isinstance(n, rv_frozen), "n should be a SciPy probability distribution"
|
assert isinstance(n, rv_frozen), "n should be a SciPy probability distribution"
|
||||||
assert isinstance(gamma, rv_frozen), "gamma should be a SciPy probability distribution"
|
assert isinstance(
|
||||||
|
gamma,
|
||||||
|
rv_frozen,
|
||||||
|
), "gamma should be a SciPy probability distribution"
|
||||||
self.x = x
|
self.x = x
|
||||||
self.y = y
|
self.y = y
|
||||||
self.n = n
|
self.n = n
|
||||||
self.gamma = gamma
|
self.gamma = gamma
|
||||||
self.round = round
|
self.round = round
|
||||||
|
|
||||||
if fix_cities:
|
if fix_cities:
|
||||||
self.fixed_n, self.fixed_cities = self._generate_cities()
|
self.fixed_n, self.fixed_cities = self._generate_cities()
|
||||||
else:
|
else:
|
||||||
self.fixed_n = None
|
self.fixed_n = None
|
||||||
self.fixed_cities = None
|
self.fixed_cities = None
|
||||||
|
|
||||||
def generate(self, n_samples):
|
def generate(self, n_samples):
|
||||||
def _sample():
|
def _sample():
|
||||||
if self.fixed_cities is not None:
|
if self.fixed_cities is not None:
|
||||||
@@ -103,54 +109,62 @@ class TravelingSalesmanGenerator:
|
|||||||
if self.round:
|
if self.round:
|
||||||
distances = distances.round()
|
distances = distances.round()
|
||||||
return TravelingSalesmanInstance(n, distances)
|
return TravelingSalesmanInstance(n, distances)
|
||||||
|
|
||||||
return [_sample() for _ in range(n_samples)]
|
return [_sample() for _ in range(n_samples)]
|
||||||
|
|
||||||
def _generate_cities(self):
|
def _generate_cities(self):
|
||||||
n = self.n.rvs()
|
n = self.n.rvs()
|
||||||
cities = np.array([(self.x.rvs(), self.y.rvs()) for _ in range(n)])
|
cities = np.array([(self.x.rvs(), self.y.rvs()) for _ in range(n)])
|
||||||
return n, cities
|
return n, cities
|
||||||
|
|
||||||
|
|
||||||
class TravelingSalesmanInstance(Instance):
|
class TravelingSalesmanInstance(Instance):
|
||||||
"""An instance ot the Traveling Salesman Problem.
|
"""An instance ot the Traveling Salesman Problem.
|
||||||
|
|
||||||
Given a list of cities and the distance between each pair of cities, the problem asks for the
|
Given a list of cities and the distance between each pair of cities, the problem asks for the
|
||||||
shortest route starting at the first city, visiting each other city exactly once, then
|
shortest route starting at the first city, visiting each other city exactly once, then
|
||||||
returning to the first city. This problem is a generalization of the Hamiltonian path problem,
|
returning to the first city. This problem is a generalization of the Hamiltonian path problem,
|
||||||
one of Karp's 21 NP-complete problems.
|
one of Karp's 21 NP-complete problems.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, n_cities, distances):
|
def __init__(self, n_cities, distances):
|
||||||
assert isinstance(distances, np.ndarray)
|
assert isinstance(distances, np.ndarray)
|
||||||
assert distances.shape == (n_cities, n_cities)
|
assert distances.shape == (n_cities, n_cities)
|
||||||
self.n_cities = n_cities
|
self.n_cities = n_cities
|
||||||
self.distances = distances
|
self.distances = distances
|
||||||
|
|
||||||
def to_model(self):
|
def to_model(self):
|
||||||
model = pe.ConcreteModel()
|
model = pe.ConcreteModel()
|
||||||
model.edges = edges = [(i,j)
|
model.edges = edges = [
|
||||||
for i in range(self.n_cities)
|
(i, j) for i in range(self.n_cities) for j in range(i + 1, self.n_cities)
|
||||||
for j in range(i+1, self.n_cities)]
|
]
|
||||||
model.x = pe.Var(edges, domain=pe.Binary)
|
model.x = pe.Var(edges, domain=pe.Binary)
|
||||||
model.obj = pe.Objective(expr=sum(model.x[i,j] * self.distances[i,j]
|
model.obj = pe.Objective(
|
||||||
for (i,j) in edges),
|
expr=sum(model.x[i, j] * self.distances[i, j] for (i, j) in edges),
|
||||||
sense=pe.minimize)
|
sense=pe.minimize,
|
||||||
|
)
|
||||||
model.eq_degree = pe.ConstraintList()
|
model.eq_degree = pe.ConstraintList()
|
||||||
model.eq_subtour = pe.ConstraintList()
|
model.eq_subtour = pe.ConstraintList()
|
||||||
for i in range(self.n_cities):
|
for i in range(self.n_cities):
|
||||||
model.eq_degree.add(sum(model.x[min(i,j), max(i,j)]
|
model.eq_degree.add(
|
||||||
for j in range(self.n_cities) if i != j) == 2)
|
sum(
|
||||||
|
model.x[min(i, j), max(i, j)]
|
||||||
|
for j in range(self.n_cities)
|
||||||
|
if i != j
|
||||||
|
)
|
||||||
|
== 2
|
||||||
|
)
|
||||||
return model
|
return model
|
||||||
|
|
||||||
def get_instance_features(self):
|
def get_instance_features(self):
|
||||||
return np.array([1])
|
return np.array([1])
|
||||||
|
|
||||||
def get_variable_features(self, var_name, index):
|
def get_variable_features(self, var_name, index):
|
||||||
return np.array([1])
|
return np.array([1])
|
||||||
|
|
||||||
def get_variable_category(self, var_name, index):
|
def get_variable_category(self, var_name, index):
|
||||||
return index
|
return index
|
||||||
|
|
||||||
def find_violated_lazy_constraints(self, model):
|
def find_violated_lazy_constraints(self, model):
|
||||||
selected_edges = [e for e in model.edges if model.x[e].value > 0.5]
|
selected_edges = [e for e in model.edges if model.x[e].value > 0.5]
|
||||||
graph = nx.Graph()
|
graph = nx.Graph()
|
||||||
@@ -161,15 +175,18 @@ class TravelingSalesmanInstance(Instance):
|
|||||||
if len(c) < self.n_cities:
|
if len(c) < self.n_cities:
|
||||||
violations += [c]
|
violations += [c]
|
||||||
return violations
|
return violations
|
||||||
|
|
||||||
def build_lazy_constraint(self, model, component):
|
def build_lazy_constraint(self, model, component):
|
||||||
cut_edges = [e for e in model.edges
|
cut_edges = [
|
||||||
if (e[0] in component and e[1] not in component) or
|
e
|
||||||
(e[0] not in component and e[1] in component)]
|
for e in model.edges
|
||||||
|
if (e[0] in component and e[1] not in component)
|
||||||
|
or (e[0] not in component and e[1] in component)
|
||||||
|
]
|
||||||
return model.eq_subtour.add(sum(model.x[e] for e in cut_edges) >= 2)
|
return model.eq_subtour.add(sum(model.x[e] for e in cut_edges) >= 2)
|
||||||
|
|
||||||
def find_violated_user_cuts(self, model):
|
def find_violated_user_cuts(self, model):
|
||||||
return self.find_violated_lazy_constraints(model)
|
return self.find_violated_lazy_constraints(model)
|
||||||
|
|
||||||
def build_user_cut(self, model, violation):
|
def build_user_cut(self, model, violation):
|
||||||
return self.build_lazy_constraint(model, violation)
|
return self.build_lazy_constraint(model, violation)
|
||||||
|
|||||||
@@ -13,10 +13,11 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
|
|
||||||
class GurobiSolver(InternalSolver):
|
class GurobiSolver(InternalSolver):
|
||||||
def __init__(self,
|
def __init__(
|
||||||
params=None,
|
self,
|
||||||
lazy_cb_frequency=1,
|
params=None,
|
||||||
):
|
lazy_cb_frequency=1,
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
An InternalSolver backed by Gurobi's Python API (without Pyomo).
|
An InternalSolver backed by Gurobi's Python API (without Pyomo).
|
||||||
|
|
||||||
@@ -33,6 +34,7 @@ class GurobiSolver(InternalSolver):
|
|||||||
if params is None:
|
if params is None:
|
||||||
params = {}
|
params = {}
|
||||||
from gurobipy import GRB
|
from gurobipy import GRB
|
||||||
|
|
||||||
self.GRB = GRB
|
self.GRB = GRB
|
||||||
self.instance = None
|
self.instance = None
|
||||||
self.model = None
|
self.model = None
|
||||||
@@ -44,8 +46,7 @@ class GurobiSolver(InternalSolver):
|
|||||||
if lazy_cb_frequency == 1:
|
if lazy_cb_frequency == 1:
|
||||||
self.lazy_cb_where = [self.GRB.Callback.MIPSOL]
|
self.lazy_cb_where = [self.GRB.Callback.MIPSOL]
|
||||||
else:
|
else:
|
||||||
self.lazy_cb_where = [self.GRB.Callback.MIPSOL,
|
self.lazy_cb_where = [self.GRB.Callback.MIPSOL, self.GRB.Callback.MIPNODE]
|
||||||
self.GRB.Callback.MIPNODE]
|
|
||||||
|
|
||||||
def set_instance(self, instance, model=None):
|
def set_instance(self, instance, model=None):
|
||||||
self._raise_if_callback()
|
self._raise_if_callback()
|
||||||
@@ -70,14 +71,15 @@ class GurobiSolver(InternalSolver):
|
|||||||
idx = [0]
|
idx = [0]
|
||||||
else:
|
else:
|
||||||
name = m.group(1)
|
name = m.group(1)
|
||||||
idx = tuple(int(k) if k.isdecimal() else k
|
idx = tuple(
|
||||||
for k in m.group(2).split(","))
|
int(k) if k.isdecimal() else k for k in m.group(2).split(",")
|
||||||
|
)
|
||||||
if len(idx) == 1:
|
if len(idx) == 1:
|
||||||
idx = idx[0]
|
idx = idx[0]
|
||||||
if name not in self._all_vars:
|
if name not in self._all_vars:
|
||||||
self._all_vars[name] = {}
|
self._all_vars[name] = {}
|
||||||
self._all_vars[name][idx] = var
|
self._all_vars[name][idx] = var
|
||||||
if var.vtype != 'C':
|
if var.vtype != "C":
|
||||||
if name not in self._bin_vars:
|
if name not in self._bin_vars:
|
||||||
self._bin_vars[name] = {}
|
self._bin_vars[name] = {}
|
||||||
self._bin_vars[name][idx] = var
|
self._bin_vars[name][idx] = var
|
||||||
@@ -103,15 +105,9 @@ class GurobiSolver(InternalSolver):
|
|||||||
for (idx, var) in vardict.items():
|
for (idx, var) in vardict.items():
|
||||||
var.vtype = self.GRB.BINARY
|
var.vtype = self.GRB.BINARY
|
||||||
log = streams[0].getvalue()
|
log = streams[0].getvalue()
|
||||||
return {
|
return {"Optimal value": self.model.objVal, "Log": log}
|
||||||
"Optimal value": self.model.objVal,
|
|
||||||
"Log": log
|
|
||||||
}
|
|
||||||
|
|
||||||
def solve(self,
|
def solve(self, tee=False, iteration_cb=None, lazy_cb=None):
|
||||||
tee=False,
|
|
||||||
iteration_cb=None,
|
|
||||||
lazy_cb=None):
|
|
||||||
self._raise_if_callback()
|
self._raise_if_callback()
|
||||||
|
|
||||||
def cb_wrapper(cb_model, cb_where):
|
def cb_wrapper(cb_model, cb_where):
|
||||||
@@ -133,7 +129,7 @@ class GurobiSolver(InternalSolver):
|
|||||||
if tee:
|
if tee:
|
||||||
streams += [sys.stdout]
|
streams += [sys.stdout]
|
||||||
if iteration_cb is None:
|
if iteration_cb is None:
|
||||||
iteration_cb = lambda : False
|
iteration_cb = lambda: False
|
||||||
while True:
|
while True:
|
||||||
logger.debug("Solving MIP...")
|
logger.debug("Solving MIP...")
|
||||||
with RedirectOutput(streams):
|
with RedirectOutput(streams):
|
||||||
@@ -187,7 +183,9 @@ class GurobiSolver(InternalSolver):
|
|||||||
elif self.cb_where is None:
|
elif self.cb_where is None:
|
||||||
return var.x
|
return var.x
|
||||||
else:
|
else:
|
||||||
raise Exception("get_value cannot be called from cb_where=%s" % self.cb_where)
|
raise Exception(
|
||||||
|
"get_value cannot be called from cb_where=%s" % self.cb_where
|
||||||
|
)
|
||||||
|
|
||||||
def get_variables(self):
|
def get_variables(self):
|
||||||
self._raise_if_callback()
|
self._raise_if_callback()
|
||||||
@@ -220,8 +218,10 @@ class GurobiSolver(InternalSolver):
|
|||||||
if value is not None:
|
if value is not None:
|
||||||
count_fixed += 1
|
count_fixed += 1
|
||||||
self._all_vars[varname][idx].start = value
|
self._all_vars[varname][idx].start = value
|
||||||
logger.info("Setting start values for %d variables (out of %d)" %
|
logger.info(
|
||||||
(count_fixed, count_total))
|
"Setting start values for %d variables (out of %d)"
|
||||||
|
% (count_fixed, count_total)
|
||||||
|
)
|
||||||
|
|
||||||
def clear_warm_start(self):
|
def clear_warm_start(self):
|
||||||
self._raise_if_callback()
|
self._raise_if_callback()
|
||||||
@@ -248,10 +248,7 @@ class GurobiSolver(InternalSolver):
|
|||||||
def extract_constraint(self, cid):
|
def extract_constraint(self, cid):
|
||||||
self._raise_if_callback()
|
self._raise_if_callback()
|
||||||
constr = self.model.getConstrByName(cid)
|
constr = self.model.getConstrByName(cid)
|
||||||
cobj = (self.model.getRow(constr),
|
cobj = (self.model.getRow(constr), constr.sense, constr.RHS, constr.ConstrName)
|
||||||
constr.sense,
|
|
||||||
constr.RHS,
|
|
||||||
constr.ConstrName)
|
|
||||||
self.model.remove(constr)
|
self.model.remove(constr)
|
||||||
return cobj
|
return cobj
|
||||||
|
|
||||||
@@ -316,7 +313,7 @@ class GurobiSolver(InternalSolver):
|
|||||||
value = matches[0]
|
value = matches[0]
|
||||||
return value
|
return value
|
||||||
|
|
||||||
def __getstate__(self):
|
def __getstate__(self):
|
||||||
return {
|
return {
|
||||||
"params": self.params,
|
"params": self.params,
|
||||||
"lazy_cb_where": self.lazy_cb_where,
|
"lazy_cb_where": self.lazy_cb_where,
|
||||||
@@ -324,6 +321,7 @@ class GurobiSolver(InternalSolver):
|
|||||||
|
|
||||||
def __setstate__(self, state):
|
def __setstate__(self, state):
|
||||||
from gurobipy import GRB
|
from gurobipy import GRB
|
||||||
|
|
||||||
self.params = state["params"]
|
self.params = state["params"]
|
||||||
self.lazy_cb_where = state["lazy_cb_where"]
|
self.lazy_cb_where = state["lazy_cb_where"]
|
||||||
self.GRB = GRB
|
self.GRB = GRB
|
||||||
@@ -331,4 +329,4 @@ class GurobiSolver(InternalSolver):
|
|||||||
self.model = None
|
self.model = None
|
||||||
self._all_vars = None
|
self._all_vars = None
|
||||||
self._bin_vars = None
|
self._bin_vars = None
|
||||||
self.cb_where = None
|
self.cb_where = None
|
||||||
|
|||||||
@@ -222,4 +222,3 @@ class InternalSolver(ABC):
|
|||||||
for idx in indices:
|
for idx in indices:
|
||||||
solution[var][idx] = 0.0
|
solution[var][idx] = 0.0
|
||||||
return solution
|
return solution
|
||||||
|
|
||||||
|
|||||||
@@ -12,10 +12,12 @@ from copy import deepcopy
|
|||||||
from typing import Optional, List
|
from typing import Optional, List
|
||||||
from p_tqdm import p_map
|
from p_tqdm import p_map
|
||||||
|
|
||||||
from .. import (ObjectiveValueComponent,
|
from .. import (
|
||||||
PrimalSolutionComponent,
|
ObjectiveValueComponent,
|
||||||
DynamicLazyConstraintsComponent,
|
PrimalSolutionComponent,
|
||||||
UserCutsComponent)
|
DynamicLazyConstraintsComponent,
|
||||||
|
UserCutsComponent,
|
||||||
|
)
|
||||||
from .pyomo.cplex import CplexPyomoSolver
|
from .pyomo.cplex import CplexPyomoSolver
|
||||||
from .pyomo.gurobi import GurobiPyomoSolver
|
from .pyomo.gurobi import GurobiPyomoSolver
|
||||||
|
|
||||||
@@ -43,16 +45,18 @@ def _parallel_solve(idx):
|
|||||||
|
|
||||||
|
|
||||||
class LearningSolver:
|
class LearningSolver:
|
||||||
def __init__(self,
|
def __init__(
|
||||||
components=None,
|
self,
|
||||||
gap_tolerance=1e-4,
|
components=None,
|
||||||
mode="exact",
|
gap_tolerance=1e-4,
|
||||||
solver="gurobi",
|
mode="exact",
|
||||||
threads=None,
|
solver="gurobi",
|
||||||
time_limit=None,
|
threads=None,
|
||||||
node_limit=None,
|
time_limit=None,
|
||||||
solve_lp_first=True,
|
node_limit=None,
|
||||||
use_lazy_cb=False):
|
solve_lp_first=True,
|
||||||
|
use_lazy_cb=False,
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
Mixed-Integer Linear Programming (MIP) solver that extracts information
|
Mixed-Integer Linear Programming (MIP) solver that extracts information
|
||||||
from previous runs and uses Machine Learning methods to accelerate the
|
from previous runs and uses Machine Learning methods to accelerate the
|
||||||
@@ -142,28 +146,30 @@ class LearningSolver:
|
|||||||
solver.set_node_limit(self.node_limit)
|
solver.set_node_limit(self.node_limit)
|
||||||
return solver
|
return solver
|
||||||
|
|
||||||
def solve(self,
|
def solve(
|
||||||
instance,
|
self,
|
||||||
model=None,
|
instance,
|
||||||
output="",
|
model=None,
|
||||||
tee=False):
|
output="",
|
||||||
|
tee=False,
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
Solves the given instance. If trained machine-learning models are
|
Solves the given instance. If trained machine-learning models are
|
||||||
available, they will be used to accelerate the solution process.
|
available, they will be used to accelerate the solution process.
|
||||||
|
|
||||||
The argument `instance` may be either an Instance object or a
|
The argument `instance` may be either an Instance object or a
|
||||||
filename pointing to a pickled Instance object.
|
filename pointing to a pickled Instance object.
|
||||||
|
|
||||||
This method modifies the instance object. Specifically, the following
|
This method modifies the instance object. Specifically, the following
|
||||||
properties are set:
|
properties are set:
|
||||||
|
|
||||||
- instance.lp_solution
|
- instance.lp_solution
|
||||||
- instance.lp_value
|
- instance.lp_value
|
||||||
- instance.lower_bound
|
- instance.lower_bound
|
||||||
- instance.upper_bound
|
- instance.upper_bound
|
||||||
- instance.solution
|
- instance.solution
|
||||||
- instance.solver_log
|
- instance.solver_log
|
||||||
|
|
||||||
Additional solver components may set additional properties. Please
|
Additional solver components may set additional properties. Please
|
||||||
see their documentation for more details. If a filename is provided,
|
see their documentation for more details. If a filename is provided,
|
||||||
then the file is modified in-place. That is, the original file is
|
then the file is modified in-place. That is, the original file is
|
||||||
@@ -197,7 +203,7 @@ class LearningSolver:
|
|||||||
"Predicted UB". See the documentation of each component for more
|
"Predicted UB". See the documentation of each component for more
|
||||||
details.
|
details.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
filename = None
|
filename = None
|
||||||
fileformat = None
|
fileformat = None
|
||||||
if isinstance(instance, str):
|
if isinstance(instance, str):
|
||||||
@@ -211,7 +217,7 @@ class LearningSolver:
|
|||||||
fileformat = "pickle"
|
fileformat = "pickle"
|
||||||
with open(filename, "rb") as file:
|
with open(filename, "rb") as file:
|
||||||
instance = pickle.load(file)
|
instance = pickle.load(file)
|
||||||
|
|
||||||
if model is None:
|
if model is None:
|
||||||
model = instance.to_model()
|
model = instance.to_model()
|
||||||
|
|
||||||
@@ -248,9 +254,11 @@ class LearningSolver:
|
|||||||
lazy_cb = lazy_cb_wrapper
|
lazy_cb = lazy_cb_wrapper
|
||||||
|
|
||||||
logger.info("Solving MILP...")
|
logger.info("Solving MILP...")
|
||||||
results = self.internal_solver.solve(tee=tee,
|
results = self.internal_solver.solve(
|
||||||
iteration_cb=iteration_cb,
|
tee=tee,
|
||||||
lazy_cb=lazy_cb)
|
iteration_cb=iteration_cb,
|
||||||
|
lazy_cb=lazy_cb,
|
||||||
|
)
|
||||||
results["LP value"] = instance.lp_value
|
results["LP value"] = instance.lp_value
|
||||||
|
|
||||||
# Read MIP solution and bounds
|
# Read MIP solution and bounds
|
||||||
@@ -262,7 +270,7 @@ class LearningSolver:
|
|||||||
logger.debug("Calling after_solve callbacks...")
|
logger.debug("Calling after_solve callbacks...")
|
||||||
for component in self.components.values():
|
for component in self.components.values():
|
||||||
component.after_solve(self, instance, model, results)
|
component.after_solve(self, instance, model, results)
|
||||||
|
|
||||||
if filename is not None and output is not None:
|
if filename is not None and output is not None:
|
||||||
output_filename = output
|
output_filename = output
|
||||||
if len(output) == 0:
|
if len(output) == 0:
|
||||||
@@ -280,36 +288,38 @@ class LearningSolver:
|
|||||||
def parallel_solve(self, instances, n_jobs=4, label="Solve", output=[]):
|
def parallel_solve(self, instances, n_jobs=4, label="Solve", output=[]):
|
||||||
"""
|
"""
|
||||||
Solves multiple instances in parallel.
|
Solves multiple instances in parallel.
|
||||||
|
|
||||||
This method is equivalent to calling `solve` for each item on the list,
|
This method is equivalent to calling `solve` for each item on the list,
|
||||||
but it processes multiple instances at the same time. Like `solve`, this
|
but it processes multiple instances at the same time. Like `solve`, this
|
||||||
method modifies each instance in place. Also like `solve`, a list of
|
method modifies each instance in place. Also like `solve`, a list of
|
||||||
filenames may be provided.
|
filenames may be provided.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
instances: [miplearn.Instance] or [str]
|
instances: [miplearn.Instance] or [str]
|
||||||
The instances to be solved
|
The instances to be solved
|
||||||
n_jobs: int
|
n_jobs: int
|
||||||
Number of instances to solve in parallel at a time.
|
Number of instances to solve in parallel at a time.
|
||||||
|
|
||||||
Returns
|
Returns
|
||||||
-------
|
-------
|
||||||
Returns a list of dictionaries, with one entry for each provided instance.
|
Returns a list of dictionaries, with one entry for each provided instance.
|
||||||
This dictionary is the same you would obtain by calling:
|
This dictionary is the same you would obtain by calling:
|
||||||
|
|
||||||
[solver.solve(p) for p in instances]
|
[solver.solve(p) for p in instances]
|
||||||
|
|
||||||
"""
|
"""
|
||||||
self.internal_solver = None
|
self.internal_solver = None
|
||||||
self._silence_miplearn_logger()
|
self._silence_miplearn_logger()
|
||||||
SOLVER[0] = self
|
SOLVER[0] = self
|
||||||
OUTPUTS[0] = output
|
OUTPUTS[0] = output
|
||||||
INSTANCES[0] = instances
|
INSTANCES[0] = instances
|
||||||
results = p_map(_parallel_solve,
|
results = p_map(
|
||||||
list(range(len(instances))),
|
_parallel_solve,
|
||||||
num_cpus=n_jobs,
|
list(range(len(instances))),
|
||||||
desc=label)
|
num_cpus=n_jobs,
|
||||||
|
desc=label,
|
||||||
|
)
|
||||||
stats = []
|
stats = []
|
||||||
for (idx, (s, instance)) in enumerate(results):
|
for (idx, (s, instance)) in enumerate(results):
|
||||||
stats.append(s)
|
stats.append(s)
|
||||||
@@ -330,12 +340,12 @@ class LearningSolver:
|
|||||||
def _silence_miplearn_logger(self):
|
def _silence_miplearn_logger(self):
|
||||||
miplearn_logger = logging.getLogger("miplearn")
|
miplearn_logger = logging.getLogger("miplearn")
|
||||||
self.prev_log_level = miplearn_logger.getEffectiveLevel()
|
self.prev_log_level = miplearn_logger.getEffectiveLevel()
|
||||||
miplearn_logger.setLevel(logging.WARNING)
|
miplearn_logger.setLevel(logging.WARNING)
|
||||||
|
|
||||||
def _restore_miplearn_logger(self):
|
def _restore_miplearn_logger(self):
|
||||||
miplearn_logger = logging.getLogger("miplearn")
|
miplearn_logger = logging.getLogger("miplearn")
|
||||||
miplearn_logger.setLevel(self.prev_log_level)
|
miplearn_logger.setLevel(self.prev_log_level)
|
||||||
|
|
||||||
def __getstate__(self):
|
def __getstate__(self):
|
||||||
self.internal_solver = None
|
self.internal_solver = None
|
||||||
return self.__dict__
|
return self.__dict__
|
||||||
|
|||||||
@@ -81,8 +81,10 @@ class BasePyomoSolver(InternalSolver):
|
|||||||
count_fixed += 1
|
count_fixed += 1
|
||||||
if count_fixed > 0:
|
if count_fixed > 0:
|
||||||
self._is_warm_start_available = True
|
self._is_warm_start_available = True
|
||||||
logger.info("Setting start values for %d variables (out of %d)" %
|
logger.info(
|
||||||
(count_fixed, count_total))
|
"Setting start values for %d variables (out of %d)"
|
||||||
|
% (count_fixed, count_total)
|
||||||
|
)
|
||||||
|
|
||||||
def clear_warm_start(self):
|
def clear_warm_start(self):
|
||||||
for var in self._all_vars:
|
for var in self._all_vars:
|
||||||
@@ -134,17 +136,19 @@ class BasePyomoSolver(InternalSolver):
|
|||||||
count_fixed += 1
|
count_fixed += 1
|
||||||
var[index].fix(solution[varname][index])
|
var[index].fix(solution[varname][index])
|
||||||
self._pyomo_solver.update_var(var[index])
|
self._pyomo_solver.update_var(var[index])
|
||||||
logger.info("Fixing values for %d variables (out of %d)" %
|
logger.info(
|
||||||
(count_fixed, count_total))
|
"Fixing values for %d variables (out of %d)"
|
||||||
|
% (
|
||||||
|
count_fixed,
|
||||||
|
count_total,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
def add_constraint(self, constraint):
|
def add_constraint(self, constraint):
|
||||||
self._pyomo_solver.add_constraint(constraint)
|
self._pyomo_solver.add_constraint(constraint)
|
||||||
self._update_constrs()
|
self._update_constrs()
|
||||||
|
|
||||||
def solve(self,
|
def solve(self, tee=False, iteration_cb=None, lazy_cb=None):
|
||||||
tee=False,
|
|
||||||
iteration_cb=None,
|
|
||||||
lazy_cb=None):
|
|
||||||
if lazy_cb is not None:
|
if lazy_cb is not None:
|
||||||
raise Exception("lazy callback not supported")
|
raise Exception("lazy callback not supported")
|
||||||
total_wallclock_time = 0
|
total_wallclock_time = 0
|
||||||
@@ -158,8 +162,10 @@ class BasePyomoSolver(InternalSolver):
|
|||||||
while True:
|
while True:
|
||||||
logger.debug("Solving MIP...")
|
logger.debug("Solving MIP...")
|
||||||
with RedirectOutput(streams):
|
with RedirectOutput(streams):
|
||||||
results = self._pyomo_solver.solve(tee=True,
|
results = self._pyomo_solver.solve(
|
||||||
warmstart=self._is_warm_start_available)
|
tee=True,
|
||||||
|
warmstart=self._is_warm_start_available,
|
||||||
|
)
|
||||||
total_wallclock_time += results["Solver"][0]["Wallclock time"]
|
total_wallclock_time += results["Solver"][0]["Wallclock time"]
|
||||||
should_repeat = iteration_cb()
|
should_repeat = iteration_cb()
|
||||||
if not should_repeat:
|
if not should_repeat:
|
||||||
@@ -192,9 +198,7 @@ class BasePyomoSolver(InternalSolver):
|
|||||||
return value
|
return value
|
||||||
|
|
||||||
def _extract_node_count(self, log):
|
def _extract_node_count(self, log):
|
||||||
return int(self.__extract(log,
|
return int(self.__extract(log, self._get_node_count_regexp(), default=1))
|
||||||
self._get_node_count_regexp(),
|
|
||||||
default=1))
|
|
||||||
|
|
||||||
def set_threads(self, threads):
|
def set_threads(self, threads):
|
||||||
key = self._get_threads_option_name()
|
key = self._get_threads_option_name()
|
||||||
@@ -249,4 +253,4 @@ class BasePyomoSolver(InternalSolver):
|
|||||||
raise Exception("not implemented")
|
raise Exception("not implemented")
|
||||||
|
|
||||||
def get_constraint_slacks(self):
|
def get_constraint_slacks(self):
|
||||||
raise Exception("not implemented")
|
raise Exception("not implemented")
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ class CplexPyomoSolver(BasePyomoSolver):
|
|||||||
{"mip_display": 5} to increase the log verbosity.
|
{"mip_display": 5} to increase the log verbosity.
|
||||||
"""
|
"""
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self._pyomo_solver = pe.SolverFactory('cplex_persistent')
|
self._pyomo_solver = pe.SolverFactory("cplex_persistent")
|
||||||
self._pyomo_solver.options["randomseed"] = randint(low=0, high=1000).rvs()
|
self._pyomo_solver.options["randomseed"] = randint(low=0, high=1000).rvs()
|
||||||
self._pyomo_solver.options["mip_display"] = 4
|
self._pyomo_solver.options["mip_display"] = 4
|
||||||
if options is not None:
|
if options is not None:
|
||||||
|
|||||||
@@ -15,8 +15,7 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
|
|
||||||
class GurobiPyomoSolver(BasePyomoSolver):
|
class GurobiPyomoSolver(BasePyomoSolver):
|
||||||
def __init__(self,
|
def __init__(self, options=None):
|
||||||
options=None):
|
|
||||||
"""
|
"""
|
||||||
Creates a new Gurobi solver, accessed through Pyomo.
|
Creates a new Gurobi solver, accessed through Pyomo.
|
||||||
|
|
||||||
@@ -27,7 +26,7 @@ class GurobiPyomoSolver(BasePyomoSolver):
|
|||||||
{"Threads": 4} to set the number of threads.
|
{"Threads": 4} to set the number of threads.
|
||||||
"""
|
"""
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self._pyomo_solver = pe.SolverFactory('gurobi_persistent')
|
self._pyomo_solver = pe.SolverFactory("gurobi_persistent")
|
||||||
self._pyomo_solver.options["Seed"] = randint(low=0, high=1000).rvs()
|
self._pyomo_solver.options["Seed"] = randint(low=0, high=1000).rvs()
|
||||||
if options is not None:
|
if options is not None:
|
||||||
for (key, value) in options.items():
|
for (key, value) in options.items():
|
||||||
@@ -56,6 +55,7 @@ class GurobiPyomoSolver(BasePyomoSolver):
|
|||||||
|
|
||||||
def set_branching_priorities(self, priorities):
|
def set_branching_priorities(self, priorities):
|
||||||
from gurobipy import GRB
|
from gurobipy import GRB
|
||||||
|
|
||||||
for varname in priorities.keys():
|
for varname in priorities.keys():
|
||||||
var = self._varname_to_var[varname]
|
var = self._varname_to_var[varname]
|
||||||
for (index, priority) in priorities[varname].items():
|
for (index, priority) in priorities[varname].items():
|
||||||
|
|||||||
@@ -9,20 +9,22 @@ from miplearn.problems.knapsack import KnapsackInstance, GurobiKnapsackInstance
|
|||||||
|
|
||||||
def _get_instance(solver):
|
def _get_instance(solver):
|
||||||
def _is_subclass_or_instance(solver, parentClass):
|
def _is_subclass_or_instance(solver, parentClass):
|
||||||
return isinstance(solver, parentClass) or (isclass(solver) and issubclass(solver, parentClass))
|
return isinstance(solver, parentClass) or (
|
||||||
|
isclass(solver) and issubclass(solver, parentClass)
|
||||||
|
)
|
||||||
|
|
||||||
if _is_subclass_or_instance(solver, BasePyomoSolver):
|
if _is_subclass_or_instance(solver, BasePyomoSolver):
|
||||||
return KnapsackInstance(
|
return KnapsackInstance(
|
||||||
weights=[23., 26., 20., 18.],
|
weights=[23.0, 26.0, 20.0, 18.0],
|
||||||
prices=[505., 352., 458., 220.],
|
prices=[505.0, 352.0, 458.0, 220.0],
|
||||||
capacity=67.,
|
capacity=67.0,
|
||||||
)
|
)
|
||||||
|
|
||||||
if _is_subclass_or_instance(solver, GurobiSolver):
|
if _is_subclass_or_instance(solver, GurobiSolver):
|
||||||
return GurobiKnapsackInstance(
|
return GurobiKnapsackInstance(
|
||||||
weights=[23., 26., 20., 18.],
|
weights=[23.0, 26.0, 20.0, 18.0],
|
||||||
prices=[505., 352., 458., 220.],
|
prices=[505.0, 352.0, 458.0, 220.0],
|
||||||
capacity=67.,
|
capacity=67.0,
|
||||||
)
|
)
|
||||||
|
|
||||||
assert False
|
assert False
|
||||||
|
|||||||
@@ -16,6 +16,7 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
def test_redirect_output():
|
def test_redirect_output():
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
original_stdout = sys.stdout
|
original_stdout = sys.stdout
|
||||||
io = StringIO()
|
io = StringIO()
|
||||||
with RedirectOutput([io]):
|
with RedirectOutput([io]):
|
||||||
@@ -31,36 +32,42 @@ def test_internal_solver_warm_starts():
|
|||||||
model = instance.to_model()
|
model = instance.to_model()
|
||||||
solver = solver_class()
|
solver = solver_class()
|
||||||
solver.set_instance(instance, model)
|
solver.set_instance(instance, model)
|
||||||
solver.set_warm_start({
|
solver.set_warm_start(
|
||||||
"x": {
|
{
|
||||||
0: 1.0,
|
"x": {
|
||||||
1: 0.0,
|
0: 1.0,
|
||||||
2: 0.0,
|
1: 0.0,
|
||||||
3: 1.0,
|
2: 0.0,
|
||||||
|
3: 1.0,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
})
|
)
|
||||||
stats = solver.solve(tee=True)
|
stats = solver.solve(tee=True)
|
||||||
assert stats["Warm start value"] == 725.0
|
assert stats["Warm start value"] == 725.0
|
||||||
|
|
||||||
solver.set_warm_start({
|
solver.set_warm_start(
|
||||||
"x": {
|
{
|
||||||
0: 1.0,
|
"x": {
|
||||||
1: 1.0,
|
0: 1.0,
|
||||||
2: 1.0,
|
1: 1.0,
|
||||||
3: 1.0,
|
2: 1.0,
|
||||||
|
3: 1.0,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
})
|
)
|
||||||
stats = solver.solve(tee=True)
|
stats = solver.solve(tee=True)
|
||||||
assert stats["Warm start value"] is None
|
assert stats["Warm start value"] is None
|
||||||
|
|
||||||
solver.fix({
|
solver.fix(
|
||||||
"x": {
|
{
|
||||||
0: 1.0,
|
"x": {
|
||||||
1: 0.0,
|
0: 1.0,
|
||||||
2: 0.0,
|
1: 0.0,
|
||||||
3: 1.0,
|
2: 0.0,
|
||||||
|
3: 1.0,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
})
|
)
|
||||||
stats = solver.solve(tee=True)
|
stats = solver.solve(tee=True)
|
||||||
assert stats["Lower bound"] == 725.0
|
assert stats["Lower bound"] == 725.0
|
||||||
assert stats["Upper bound"] == 725.0
|
assert stats["Upper bound"] == 725.0
|
||||||
|
|||||||
@@ -20,11 +20,13 @@ def test_learning_solver():
|
|||||||
for internal_solver in _get_internal_solvers():
|
for internal_solver in _get_internal_solvers():
|
||||||
logger.info("Solver: %s" % internal_solver)
|
logger.info("Solver: %s" % internal_solver)
|
||||||
instance = _get_instance(internal_solver)
|
instance = _get_instance(internal_solver)
|
||||||
solver = LearningSolver(time_limit=300,
|
solver = LearningSolver(
|
||||||
gap_tolerance=1e-3,
|
time_limit=300,
|
||||||
threads=1,
|
gap_tolerance=1e-3,
|
||||||
solver=internal_solver,
|
threads=1,
|
||||||
mode=mode)
|
solver=internal_solver,
|
||||||
|
mode=mode,
|
||||||
|
)
|
||||||
|
|
||||||
solver.solve(instance)
|
solver.solve(instance)
|
||||||
assert instance.solution["x"][0] == 1.0
|
assert instance.solution["x"][0] == 1.0
|
||||||
@@ -74,37 +76,36 @@ def test_solve_fit_from_disk():
|
|||||||
filenames = []
|
filenames = []
|
||||||
for k in range(3):
|
for k in range(3):
|
||||||
instance = _get_instance(internal_solver)
|
instance = _get_instance(internal_solver)
|
||||||
with tempfile.NamedTemporaryFile(suffix=".pkl",
|
with tempfile.NamedTemporaryFile(suffix=".pkl", delete=False) as file:
|
||||||
delete=False) as file:
|
|
||||||
filenames += [file.name]
|
filenames += [file.name]
|
||||||
pickle.dump(instance, file)
|
pickle.dump(instance, file)
|
||||||
|
|
||||||
# Test: solve
|
# Test: solve
|
||||||
solver = LearningSolver(solver=internal_solver)
|
solver = LearningSolver(solver=internal_solver)
|
||||||
solver.solve(filenames[0])
|
solver.solve(filenames[0])
|
||||||
with open(filenames[0], "rb") as file:
|
with open(filenames[0], "rb") as file:
|
||||||
instance = pickle.load(file)
|
instance = pickle.load(file)
|
||||||
assert hasattr(instance, "solution")
|
assert hasattr(instance, "solution")
|
||||||
|
|
||||||
# Test: parallel_solve
|
# Test: parallel_solve
|
||||||
solver.parallel_solve(filenames)
|
solver.parallel_solve(filenames)
|
||||||
for filename in filenames:
|
for filename in filenames:
|
||||||
with open(filename, "rb") as file:
|
with open(filename, "rb") as file:
|
||||||
instance = pickle.load(file)
|
instance = pickle.load(file)
|
||||||
assert hasattr(instance, "solution")
|
assert hasattr(instance, "solution")
|
||||||
|
|
||||||
# Test: solve (with specified output)
|
# Test: solve (with specified output)
|
||||||
output = [f + ".out" for f in filenames]
|
output = [f + ".out" for f in filenames]
|
||||||
solver.solve(filenames[0], output=output[0])
|
solver.solve(filenames[0], output=output[0])
|
||||||
assert os.path.isfile(output[0])
|
assert os.path.isfile(output[0])
|
||||||
|
|
||||||
# Test: parallel_solve (with specified output)
|
# Test: parallel_solve (with specified output)
|
||||||
solver.parallel_solve(filenames, output=output)
|
solver.parallel_solve(filenames, output=output)
|
||||||
for filename in output:
|
for filename in output:
|
||||||
assert os.path.isfile(filename)
|
assert os.path.isfile(filename)
|
||||||
|
|
||||||
# Delete temporary files
|
# Delete temporary files
|
||||||
for filename in filenames:
|
for filename in filenames:
|
||||||
os.remove(filename)
|
os.remove(filename)
|
||||||
for filename in output:
|
for filename in output:
|
||||||
os.remove(filename)
|
os.remove(filename)
|
||||||
|
|||||||
@@ -8,14 +8,14 @@ from miplearn.problems.knapsack import KnapsackInstance
|
|||||||
def get_test_pyomo_instances():
|
def get_test_pyomo_instances():
|
||||||
instances = [
|
instances = [
|
||||||
KnapsackInstance(
|
KnapsackInstance(
|
||||||
weights=[23., 26., 20., 18.],
|
weights=[23.0, 26.0, 20.0, 18.0],
|
||||||
prices=[505., 352., 458., 220.],
|
prices=[505.0, 352.0, 458.0, 220.0],
|
||||||
capacity=67.,
|
capacity=67.0,
|
||||||
),
|
),
|
||||||
KnapsackInstance(
|
KnapsackInstance(
|
||||||
weights=[25., 30., 22., 18.],
|
weights=[25.0, 30.0, 22.0, 18.0],
|
||||||
prices=[500., 365., 420., 150.],
|
prices=[500.0, 365.0, 420.0, 150.0],
|
||||||
capacity=70.,
|
capacity=70.0,
|
||||||
),
|
),
|
||||||
]
|
]
|
||||||
models = [instance.to_model() for instance in instances]
|
models = [instance.to_model() for instance in instances]
|
||||||
|
|||||||
@@ -11,8 +11,9 @@ from scipy.stats import randint
|
|||||||
|
|
||||||
def test_benchmark():
|
def test_benchmark():
|
||||||
# Generate training and test instances
|
# Generate training and test instances
|
||||||
train_instances = MaxWeightStableSetGenerator(n=randint(low=25, high=26)).generate(5)
|
generator = MaxWeightStableSetGenerator(n=randint(low=25, high=26))
|
||||||
test_instances = MaxWeightStableSetGenerator(n=randint(low=25, high=26)).generate(3)
|
train_instances = generator.generate(5)
|
||||||
|
test_instances = generator.generate(3)
|
||||||
|
|
||||||
# Training phase...
|
# Training phase...
|
||||||
training_solver = LearningSolver()
|
training_solver = LearningSolver()
|
||||||
@@ -26,11 +27,11 @@ def test_benchmark():
|
|||||||
benchmark = BenchmarkRunner(test_solvers)
|
benchmark = BenchmarkRunner(test_solvers)
|
||||||
benchmark.fit(train_instances)
|
benchmark.fit(train_instances)
|
||||||
benchmark.parallel_solve(test_instances, n_jobs=2, n_trials=2)
|
benchmark.parallel_solve(test_instances, n_jobs=2, n_trials=2)
|
||||||
assert benchmark.raw_results().values.shape == (12,16)
|
assert benchmark.raw_results().values.shape == (12, 16)
|
||||||
|
|
||||||
benchmark.save_results("/tmp/benchmark.csv")
|
benchmark.save_results("/tmp/benchmark.csv")
|
||||||
assert os.path.isfile("/tmp/benchmark.csv")
|
assert os.path.isfile("/tmp/benchmark.csv")
|
||||||
|
|
||||||
benchmark = BenchmarkRunner(test_solvers)
|
benchmark = BenchmarkRunner(test_solvers)
|
||||||
benchmark.load_results("/tmp/benchmark.csv")
|
benchmark.load_results("/tmp/benchmark.csv")
|
||||||
assert benchmark.raw_results().values.shape == (12,16)
|
assert benchmark.raw_results().values.shape == (12, 16)
|
||||||
|
|||||||
@@ -3,25 +3,28 @@
|
|||||||
# Released under the modified BSD license. See COPYING.md for more details.
|
# Released under the modified BSD license. See COPYING.md for more details.
|
||||||
|
|
||||||
from miplearn.problems.knapsack import KnapsackInstance
|
from miplearn.problems.knapsack import KnapsackInstance
|
||||||
from miplearn import (LearningSolver,
|
from miplearn import (
|
||||||
SolutionExtractor,
|
LearningSolver,
|
||||||
InstanceFeaturesExtractor,
|
SolutionExtractor,
|
||||||
VariableFeaturesExtractor,
|
InstanceFeaturesExtractor,
|
||||||
)
|
VariableFeaturesExtractor,
|
||||||
|
)
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import pyomo.environ as pe
|
import pyomo.environ as pe
|
||||||
|
|
||||||
|
|
||||||
def _get_instances():
|
def _get_instances():
|
||||||
instances = [
|
instances = [
|
||||||
KnapsackInstance(weights=[1., 2., 3.],
|
KnapsackInstance(
|
||||||
prices=[10., 20., 30.],
|
weights=[1.0, 2.0, 3.0],
|
||||||
capacity=2.5,
|
prices=[10.0, 20.0, 30.0],
|
||||||
),
|
capacity=2.5,
|
||||||
KnapsackInstance(weights=[3., 4., 5.],
|
),
|
||||||
prices=[20., 30., 40.],
|
KnapsackInstance(
|
||||||
capacity=4.5,
|
weights=[3.0, 4.0, 5.0],
|
||||||
),
|
prices=[20.0, 30.0, 40.0],
|
||||||
|
capacity=4.5,
|
||||||
|
),
|
||||||
]
|
]
|
||||||
models = [instance.to_model() for instance in instances]
|
models = [instance.to_model() for instance in instances]
|
||||||
solver = LearningSolver()
|
solver = LearningSolver()
|
||||||
@@ -38,25 +41,30 @@ def test_solution_extractor():
|
|||||||
assert isinstance(features["default"], np.ndarray)
|
assert isinstance(features["default"], np.ndarray)
|
||||||
assert features["default"].shape == (6, 2)
|
assert features["default"].shape == (6, 2)
|
||||||
assert features["default"].ravel().tolist() == [
|
assert features["default"].ravel().tolist() == [
|
||||||
1., 0.,
|
1.0,
|
||||||
0., 1.,
|
0.0,
|
||||||
1., 0.,
|
0.0,
|
||||||
1., 0.,
|
1.0,
|
||||||
0., 1.,
|
1.0,
|
||||||
1., 0.,
|
0.0,
|
||||||
|
1.0,
|
||||||
|
0.0,
|
||||||
|
0.0,
|
||||||
|
1.0,
|
||||||
|
1.0,
|
||||||
|
0.0,
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
def test_instance_features_extractor():
|
def test_instance_features_extractor():
|
||||||
instances, models = _get_instances()
|
instances, models = _get_instances()
|
||||||
features = InstanceFeaturesExtractor().extract(instances)
|
features = InstanceFeaturesExtractor().extract(instances)
|
||||||
assert features.shape == (2,3)
|
assert features.shape == (2, 3)
|
||||||
|
|
||||||
|
|
||||||
def test_variable_features_extractor():
|
def test_variable_features_extractor():
|
||||||
instances, models = _get_instances()
|
instances, models = _get_instances()
|
||||||
features = VariableFeaturesExtractor().extract(instances)
|
features = VariableFeaturesExtractor().extract(instances)
|
||||||
assert isinstance(features, dict)
|
assert isinstance(features, dict)
|
||||||
assert "default" in features
|
assert "default" in features
|
||||||
assert features["default"].shape == (6,5)
|
assert features["default"].shape == (6, 5)
|
||||||
|
|
||||||
|
|||||||
3
pyproject.toml
Normal file
3
pyproject.toml
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
[tool.black]
|
||||||
|
py36 = true
|
||||||
|
include = '\.pyi?$'
|
||||||
@@ -12,3 +12,5 @@ python-markdown-math~=0.8
|
|||||||
seaborn~=0.11
|
seaborn~=0.11
|
||||||
scikit-learn~=0.23
|
scikit-learn~=0.23
|
||||||
tqdm~=4.54
|
tqdm~=4.54
|
||||||
|
black==20.8b1
|
||||||
|
pre-commit~=2.9
|
||||||
|
|||||||
Reference in New Issue
Block a user