mirror of
https://github.com/ANL-CEEESA/MIPLearn.git
synced 2025-12-06 09:28:51 -06:00
Reformat source code with Black; add pre-commit hooks and CI checks
This commit is contained in:
11
.github/workflows/lint.yml
vendored
Normal file
11
.github/workflows/lint.yml
vendored
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
name: Lint
|
||||||
|
|
||||||
|
on: [push, pull_request]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
lint:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
- uses: actions/setup-python@v2
|
||||||
|
- uses: psf/black@stable
|
||||||
6
.pre-commit-config.yaml
Normal file
6
.pre-commit-config.yaml
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
repos:
|
||||||
|
- repo: https://github.com/ambv/black
|
||||||
|
rev: stable
|
||||||
|
hooks:
|
||||||
|
- id: black
|
||||||
|
args: ["--check"]
|
||||||
3
Makefile
3
Makefile
@@ -34,6 +34,9 @@ install:
|
|||||||
uninstall:
|
uninstall:
|
||||||
$(PIP) uninstall miplearn
|
$(PIP) uninstall miplearn
|
||||||
|
|
||||||
|
reformat:
|
||||||
|
$(PYTHON) -m black miplearn
|
||||||
|
|
||||||
test:
|
test:
|
||||||
$(PYTEST) $(PYTEST_ARGS)
|
$(PYTEST) $(PYTEST_ARGS)
|
||||||
|
|
||||||
|
|||||||
@@ -2,10 +2,12 @@
|
|||||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||||
# Released under the modified BSD license. See COPYING.md for more details.
|
# Released under the modified BSD license. See COPYING.md for more details.
|
||||||
|
|
||||||
from .extractors import (SolutionExtractor,
|
from .extractors import (
|
||||||
InstanceFeaturesExtractor,
|
SolutionExtractor,
|
||||||
ObjectiveValueExtractor,
|
InstanceFeaturesExtractor,
|
||||||
VariableFeaturesExtractor)
|
ObjectiveValueExtractor,
|
||||||
|
VariableFeaturesExtractor,
|
||||||
|
)
|
||||||
|
|
||||||
from .components.component import Component
|
from .components.component import Component
|
||||||
from .components.objective import ObjectiveValueComponent
|
from .components.objective import ObjectiveValueComponent
|
||||||
|
|||||||
@@ -24,27 +24,37 @@ class BenchmarkRunner:
|
|||||||
for (solver_name, solver) in self.solvers.items():
|
for (solver_name, solver) in self.solvers.items():
|
||||||
for i in tqdm(range(len((instances)))):
|
for i in tqdm(range(len((instances)))):
|
||||||
results = solver.solve(deepcopy(instances[i]), tee=tee)
|
results = solver.solve(deepcopy(instances[i]), tee=tee)
|
||||||
self._push_result(results, solver=solver, solver_name=solver_name, instance=i)
|
self._push_result(
|
||||||
|
results,
|
||||||
|
solver=solver,
|
||||||
|
solver_name=solver_name,
|
||||||
|
instance=i,
|
||||||
|
)
|
||||||
|
|
||||||
def parallel_solve(self,
|
def parallel_solve(
|
||||||
instances,
|
self,
|
||||||
n_jobs=1,
|
instances,
|
||||||
n_trials=1,
|
n_jobs=1,
|
||||||
index_offset=0,
|
n_trials=1,
|
||||||
):
|
index_offset=0,
|
||||||
|
):
|
||||||
self._silence_miplearn_logger()
|
self._silence_miplearn_logger()
|
||||||
trials = instances * n_trials
|
trials = instances * n_trials
|
||||||
for (solver_name, solver) in self.solvers.items():
|
for (solver_name, solver) in self.solvers.items():
|
||||||
results = solver.parallel_solve(trials,
|
results = solver.parallel_solve(
|
||||||
n_jobs=n_jobs,
|
trials,
|
||||||
label="Solve (%s)" % solver_name,
|
n_jobs=n_jobs,
|
||||||
output=None)
|
label="Solve (%s)" % solver_name,
|
||||||
|
output=None,
|
||||||
|
)
|
||||||
for i in range(len(trials)):
|
for i in range(len(trials)):
|
||||||
idx = (i % len(instances)) + index_offset
|
idx = (i % len(instances)) + index_offset
|
||||||
self._push_result(results[i],
|
self._push_result(
|
||||||
solver=solver,
|
results[i],
|
||||||
solver_name=solver_name,
|
solver=solver,
|
||||||
instance=idx)
|
solver_name=solver_name,
|
||||||
|
instance=idx,
|
||||||
|
)
|
||||||
self._restore_miplearn_logger()
|
self._restore_miplearn_logger()
|
||||||
|
|
||||||
def raw_results(self):
|
def raw_results(self):
|
||||||
@@ -66,53 +76,60 @@ class BenchmarkRunner:
|
|||||||
|
|
||||||
def _push_result(self, result, solver, solver_name, instance):
|
def _push_result(self, result, solver, solver_name, instance):
|
||||||
if self.results is None:
|
if self.results is None:
|
||||||
self.results = pd.DataFrame(columns=["Solver",
|
self.results = pd.DataFrame(
|
||||||
"Instance",
|
columns=[
|
||||||
"Wallclock Time",
|
"Solver",
|
||||||
"Lower Bound",
|
"Instance",
|
||||||
"Upper Bound",
|
"Wallclock Time",
|
||||||
"Gap",
|
"Lower Bound",
|
||||||
"Nodes",
|
"Upper Bound",
|
||||||
"Mode",
|
"Gap",
|
||||||
"Sense",
|
"Nodes",
|
||||||
"Predicted LB",
|
"Mode",
|
||||||
"Predicted UB",
|
"Sense",
|
||||||
])
|
"Predicted LB",
|
||||||
|
"Predicted UB",
|
||||||
|
]
|
||||||
|
)
|
||||||
lb = result["Lower bound"]
|
lb = result["Lower bound"]
|
||||||
ub = result["Upper bound"]
|
ub = result["Upper bound"]
|
||||||
gap = (ub - lb) / lb
|
gap = (ub - lb) / lb
|
||||||
if "Predicted LB" not in result:
|
if "Predicted LB" not in result:
|
||||||
result["Predicted LB"] = float("nan")
|
result["Predicted LB"] = float("nan")
|
||||||
result["Predicted UB"] = float("nan")
|
result["Predicted UB"] = float("nan")
|
||||||
self.results = self.results.append({
|
self.results = self.results.append(
|
||||||
"Solver": solver_name,
|
{
|
||||||
"Instance": instance,
|
"Solver": solver_name,
|
||||||
"Wallclock Time": result["Wallclock time"],
|
"Instance": instance,
|
||||||
"Lower Bound": lb,
|
"Wallclock Time": result["Wallclock time"],
|
||||||
"Upper Bound": ub,
|
"Lower Bound": lb,
|
||||||
"Gap": gap,
|
"Upper Bound": ub,
|
||||||
"Nodes": result["Nodes"],
|
"Gap": gap,
|
||||||
"Mode": solver.mode,
|
"Nodes": result["Nodes"],
|
||||||
"Sense": result["Sense"],
|
"Mode": solver.mode,
|
||||||
"Predicted LB": result["Predicted LB"],
|
"Sense": result["Sense"],
|
||||||
"Predicted UB": result["Predicted UB"],
|
"Predicted LB": result["Predicted LB"],
|
||||||
}, ignore_index=True)
|
"Predicted UB": result["Predicted UB"],
|
||||||
|
},
|
||||||
|
ignore_index=True,
|
||||||
|
)
|
||||||
groups = self.results.groupby("Instance")
|
groups = self.results.groupby("Instance")
|
||||||
best_lower_bound = groups["Lower Bound"].transform("max")
|
best_lower_bound = groups["Lower Bound"].transform("max")
|
||||||
best_upper_bound = groups["Upper Bound"].transform("min")
|
best_upper_bound = groups["Upper Bound"].transform("min")
|
||||||
best_gap = groups["Gap"].transform("min")
|
best_gap = groups["Gap"].transform("min")
|
||||||
best_nodes = np.maximum(1, groups["Nodes"].transform("min"))
|
best_nodes = np.maximum(1, groups["Nodes"].transform("min"))
|
||||||
best_wallclock_time = groups["Wallclock Time"].transform("min")
|
best_wallclock_time = groups["Wallclock Time"].transform("min")
|
||||||
self.results["Relative Lower Bound"] = \
|
self.results["Relative Lower Bound"] = (
|
||||||
self.results["Lower Bound"] / best_lower_bound
|
self.results["Lower Bound"] / best_lower_bound
|
||||||
self.results["Relative Upper Bound"] = \
|
)
|
||||||
self.results["Upper Bound"] / best_upper_bound
|
self.results["Relative Upper Bound"] = (
|
||||||
self.results["Relative Wallclock Time"] = \
|
self.results["Upper Bound"] / best_upper_bound
|
||||||
self.results["Wallclock Time"] / best_wallclock_time
|
)
|
||||||
self.results["Relative Gap"] = \
|
self.results["Relative Wallclock Time"] = (
|
||||||
self.results["Gap"] / best_gap
|
self.results["Wallclock Time"] / best_wallclock_time
|
||||||
self.results["Relative Nodes"] = \
|
)
|
||||||
self.results["Nodes"] / best_nodes
|
self.results["Relative Gap"] = self.results["Gap"] / best_gap
|
||||||
|
self.results["Relative Nodes"] = self.results["Nodes"] / best_nodes
|
||||||
|
|
||||||
def save_chart(self, filename):
|
def save_chart(self, filename):
|
||||||
import matplotlib.pyplot as plt
|
import matplotlib.pyplot as plt
|
||||||
@@ -134,63 +151,70 @@ class BenchmarkRunner:
|
|||||||
obj_column = "Lower Bound"
|
obj_column = "Lower Bound"
|
||||||
predicted_obj_column = "Predicted LB"
|
predicted_obj_column = "Predicted LB"
|
||||||
|
|
||||||
fig, (ax1, ax2, ax3, ax4) = plt.subplots(nrows=1,
|
fig, (ax1, ax2, ax3, ax4) = plt.subplots(
|
||||||
ncols=4,
|
nrows=1,
|
||||||
figsize=(12,4),
|
ncols=4,
|
||||||
gridspec_kw={'width_ratios': [2, 1, 1, 2]})
|
figsize=(12, 4),
|
||||||
|
gridspec_kw={"width_ratios": [2, 1, 1, 2]},
|
||||||
|
)
|
||||||
|
|
||||||
# Figure 1: Solver x Wallclock Time
|
# Figure 1: Solver x Wallclock Time
|
||||||
sns.stripplot(x="Solver",
|
sns.stripplot(
|
||||||
y="Wallclock Time",
|
x="Solver",
|
||||||
data=results,
|
y="Wallclock Time",
|
||||||
ax=ax1,
|
data=results,
|
||||||
jitter=0.25,
|
ax=ax1,
|
||||||
size=4.0,
|
jitter=0.25,
|
||||||
)
|
size=4.0,
|
||||||
sns.barplot(x="Solver",
|
)
|
||||||
y="Wallclock Time",
|
sns.barplot(
|
||||||
data=results,
|
x="Solver",
|
||||||
ax=ax1,
|
y="Wallclock Time",
|
||||||
errwidth=0.,
|
data=results,
|
||||||
alpha=0.4,
|
ax=ax1,
|
||||||
estimator=median,
|
errwidth=0.0,
|
||||||
)
|
alpha=0.4,
|
||||||
ax1.set(ylabel='Wallclock Time (s)')
|
estimator=median,
|
||||||
|
)
|
||||||
|
ax1.set(ylabel="Wallclock Time (s)")
|
||||||
|
|
||||||
# Figure 2: Solver x Gap (%)
|
# Figure 2: Solver x Gap (%)
|
||||||
ax2.set_ylim(-0.5, 5.5)
|
ax2.set_ylim(-0.5, 5.5)
|
||||||
sns.stripplot(x="Solver",
|
sns.stripplot(
|
||||||
y="Gap (%)",
|
x="Solver",
|
||||||
jitter=0.25,
|
y="Gap (%)",
|
||||||
data=results[results["Mode"] != "heuristic"],
|
jitter=0.25,
|
||||||
ax=ax2,
|
data=results[results["Mode"] != "heuristic"],
|
||||||
size=4.0,
|
ax=ax2,
|
||||||
)
|
size=4.0,
|
||||||
|
)
|
||||||
|
|
||||||
# Figure 3: Solver x Primal Value
|
# Figure 3: Solver x Primal Value
|
||||||
ax3.set_ylim(0.95,1.05)
|
ax3.set_ylim(0.95, 1.05)
|
||||||
sns.stripplot(x="Solver",
|
sns.stripplot(
|
||||||
y=primal_column,
|
x="Solver",
|
||||||
jitter=0.25,
|
y=primal_column,
|
||||||
data=results[results["Mode"] == "heuristic"],
|
jitter=0.25,
|
||||||
ax=ax3,
|
data=results[results["Mode"] == "heuristic"],
|
||||||
)
|
ax=ax3,
|
||||||
|
)
|
||||||
|
|
||||||
# Figure 4: Predicted vs Actual Objective Value
|
# Figure 4: Predicted vs Actual Objective Value
|
||||||
sns.scatterplot(x=obj_column,
|
sns.scatterplot(
|
||||||
y=predicted_obj_column,
|
x=obj_column,
|
||||||
hue="Solver",
|
y=predicted_obj_column,
|
||||||
data=results[results["Mode"] != "heuristic"],
|
hue="Solver",
|
||||||
ax=ax4,
|
data=results[results["Mode"] != "heuristic"],
|
||||||
)
|
ax=ax4,
|
||||||
|
)
|
||||||
xlim, ylim = ax4.get_xlim(), ax4.get_ylim()
|
xlim, ylim = ax4.get_xlim(), ax4.get_ylim()
|
||||||
ax4.plot([-1e10, 1e10], [-1e10, 1e10], ls='-', color="#cccccc")
|
ax4.plot([-1e10, 1e10], [-1e10, 1e10], ls="-", color="#cccccc")
|
||||||
ax4.set_xlim(xlim)
|
ax4.set_xlim(xlim)
|
||||||
ax4.set_ylim(ylim)
|
ax4.set_ylim(ylim)
|
||||||
ax4.get_legend().remove()
|
ax4.get_legend().remove()
|
||||||
|
|
||||||
fig.tight_layout()
|
fig.tight_layout()
|
||||||
plt.savefig(filename, bbox_inches='tight', dpi=150)
|
plt.savefig(filename, bbox_inches="tight", dpi=150)
|
||||||
|
|
||||||
def _silence_miplearn_logger(self):
|
def _silence_miplearn_logger(self):
|
||||||
miplearn_logger = logging.getLogger("miplearn")
|
miplearn_logger = logging.getLogger("miplearn")
|
||||||
@@ -200,5 +224,3 @@ class BenchmarkRunner:
|
|||||||
def _restore_miplearn_logger(self):
|
def _restore_miplearn_logger(self):
|
||||||
miplearn_logger = logging.getLogger("miplearn")
|
miplearn_logger = logging.getLogger("miplearn")
|
||||||
miplearn_logger.setLevel(self.prev_log_level)
|
miplearn_logger.setLevel(self.prev_log_level)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -22,9 +22,11 @@ class AdaptiveClassifier(Classifier):
|
|||||||
based on its cross-validation score on a particular training data set.
|
based on its cross-validation score on a particular training data set.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self,
|
def __init__(
|
||||||
candidates=None,
|
self,
|
||||||
evaluator=ClassifierEvaluator()):
|
candidates=None,
|
||||||
|
evaluator=ClassifierEvaluator(),
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
Initializes the meta-classifier.
|
Initializes the meta-classifier.
|
||||||
"""
|
"""
|
||||||
@@ -35,14 +37,13 @@ class AdaptiveClassifier(Classifier):
|
|||||||
"min samples": 100,
|
"min samples": 100,
|
||||||
},
|
},
|
||||||
"logistic": {
|
"logistic": {
|
||||||
"classifier": make_pipeline(StandardScaler(),
|
"classifier": make_pipeline(StandardScaler(), LogisticRegression()),
|
||||||
LogisticRegression()),
|
|
||||||
"min samples": 30,
|
"min samples": 30,
|
||||||
},
|
},
|
||||||
"counting": {
|
"counting": {
|
||||||
"classifier": CountingClassifier(),
|
"classifier": CountingClassifier(),
|
||||||
"min samples": 0,
|
"min samples": 0,
|
||||||
}
|
},
|
||||||
}
|
}
|
||||||
self.candidates = candidates
|
self.candidates = candidates
|
||||||
self.evaluator = evaluator
|
self.evaluator = evaluator
|
||||||
|
|||||||
@@ -21,8 +21,7 @@ class CountingClassifier(Classifier):
|
|||||||
self.mean = np.mean(y_train)
|
self.mean = np.mean(y_train)
|
||||||
|
|
||||||
def predict_proba(self, x_test):
|
def predict_proba(self, x_test):
|
||||||
return np.array([[1 - self.mean, self.mean]
|
return np.array([[1 - self.mean, self.mean] for _ in range(x_test.shape[0])])
|
||||||
for _ in range(x_test.shape[0])])
|
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return "CountingClassifier(mean=%s)" % self.mean
|
return "CountingClassifier(mean=%s)" % self.mean
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ from sklearn.linear_model import LogisticRegression
|
|||||||
from sklearn.model_selection import cross_val_score
|
from sklearn.model_selection import cross_val_score
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
@@ -28,12 +29,14 @@ class CrossValidatedClassifier(Classifier):
|
|||||||
acceptable. Other numbers are a linear interpolation of these two extremes.
|
acceptable. Other numbers are a linear interpolation of these two extremes.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self,
|
def __init__(
|
||||||
classifier=LogisticRegression(),
|
self,
|
||||||
threshold=0.75,
|
classifier=LogisticRegression(),
|
||||||
constant=0.0,
|
threshold=0.75,
|
||||||
cv=5,
|
constant=0.0,
|
||||||
scoring='accuracy'):
|
cv=5,
|
||||||
|
scoring="accuracy",
|
||||||
|
):
|
||||||
self.classifier = None
|
self.classifier = None
|
||||||
self.classifier_prototype = classifier
|
self.classifier_prototype = classifier
|
||||||
self.constant = constant
|
self.constant = constant
|
||||||
@@ -45,24 +48,36 @@ class CrossValidatedClassifier(Classifier):
|
|||||||
# Calculate dummy score and absolute score threshold
|
# Calculate dummy score and absolute score threshold
|
||||||
y_train_avg = np.average(y_train)
|
y_train_avg = np.average(y_train)
|
||||||
dummy_score = max(y_train_avg, 1 - y_train_avg)
|
dummy_score = max(y_train_avg, 1 - y_train_avg)
|
||||||
absolute_threshold = 1. * self.threshold + dummy_score * (1 - self.threshold)
|
absolute_threshold = 1.0 * self.threshold + dummy_score * (1 - self.threshold)
|
||||||
|
|
||||||
# Calculate cross validation score and decide which classifier to use
|
# Calculate cross validation score and decide which classifier to use
|
||||||
clf = deepcopy(self.classifier_prototype)
|
clf = deepcopy(self.classifier_prototype)
|
||||||
cv_score = float(np.mean(cross_val_score(clf,
|
cv_score = float(
|
||||||
x_train,
|
np.mean(
|
||||||
y_train,
|
cross_val_score(
|
||||||
cv=self.cv,
|
clf,
|
||||||
scoring=self.scoring)))
|
x_train,
|
||||||
|
y_train,
|
||||||
|
cv=self.cv,
|
||||||
|
scoring=self.scoring,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
)
|
||||||
if cv_score >= absolute_threshold:
|
if cv_score >= absolute_threshold:
|
||||||
logger.debug("cv_score is above threshold (%.2f >= %.2f); keeping" %
|
logger.debug(
|
||||||
(cv_score, absolute_threshold))
|
"cv_score is above threshold (%.2f >= %.2f); keeping"
|
||||||
|
% (cv_score, absolute_threshold)
|
||||||
|
)
|
||||||
self.classifier = clf
|
self.classifier = clf
|
||||||
else:
|
else:
|
||||||
logger.debug("cv_score is below threshold (%.2f < %.2f); discarding" %
|
logger.debug(
|
||||||
(cv_score, absolute_threshold))
|
"cv_score is below threshold (%.2f < %.2f); discarding"
|
||||||
self.classifier = DummyClassifier(strategy="constant",
|
% (cv_score, absolute_threshold)
|
||||||
constant=self.constant)
|
)
|
||||||
|
self.classifier = DummyClassifier(
|
||||||
|
strategy="constant",
|
||||||
|
constant=self.constant,
|
||||||
|
)
|
||||||
|
|
||||||
# Train chosen classifier
|
# Train chosen classifier
|
||||||
self.classifier.fit(x_train, y_train)
|
self.classifier.fit(x_train, y_train)
|
||||||
|
|||||||
@@ -12,7 +12,6 @@ E = 0.1
|
|||||||
def test_counting():
|
def test_counting():
|
||||||
clf = CountingClassifier()
|
clf = CountingClassifier()
|
||||||
clf.fit(np.zeros((8, 25)), [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0])
|
clf.fit(np.zeros((8, 25)), [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0])
|
||||||
expected_proba = np.array([[0.375, 0.625],
|
expected_proba = np.array([[0.375, 0.625], [0.375, 0.625]])
|
||||||
[0.375, 0.625]])
|
|
||||||
actual_proba = clf.predict_proba(np.zeros((2, 25)))
|
actual_proba = clf.predict_proba(np.zeros((2, 25)))
|
||||||
assert norm(actual_proba - expected_proba) < E
|
assert norm(actual_proba - expected_proba) < E
|
||||||
|
|||||||
@@ -13,34 +13,36 @@ E = 0.1
|
|||||||
|
|
||||||
def test_cv():
|
def test_cv():
|
||||||
# Training set: label is true if point is inside a 2D circle
|
# Training set: label is true if point is inside a 2D circle
|
||||||
x_train = np.array([[x1, x2]
|
x_train = np.array([[x1, x2] for x1 in range(-10, 11) for x2 in range(-10, 11)])
|
||||||
for x1 in range(-10, 11)
|
|
||||||
for x2 in range(-10, 11)])
|
|
||||||
x_train = StandardScaler().fit_transform(x_train)
|
x_train = StandardScaler().fit_transform(x_train)
|
||||||
n_samples = x_train.shape[0]
|
n_samples = x_train.shape[0]
|
||||||
|
|
||||||
y_train = np.array([1.0 if x1*x1 + x2*x2 <= 100 else 0.0
|
y_train = np.array(
|
||||||
for x1 in range(-10, 11)
|
[
|
||||||
for x2 in range(-10, 11)])
|
1.0 if x1 * x1 + x2 * x2 <= 100 else 0.0
|
||||||
|
for x1 in range(-10, 11)
|
||||||
|
for x2 in range(-10, 11)
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
# Support vector machines with linear kernels do not perform well on this
|
# Support vector machines with linear kernels do not perform well on this
|
||||||
# data set, so predictor should return the given constant.
|
# data set, so predictor should return the given constant.
|
||||||
clf = CrossValidatedClassifier(classifier=SVC(probability=True,
|
clf = CrossValidatedClassifier(
|
||||||
random_state=42),
|
classifier=SVC(probability=True, random_state=42),
|
||||||
threshold=0.90,
|
threshold=0.90,
|
||||||
constant=0.0,
|
constant=0.0,
|
||||||
cv=30)
|
cv=30,
|
||||||
|
)
|
||||||
clf.fit(x_train, y_train)
|
clf.fit(x_train, y_train)
|
||||||
assert norm(np.zeros(n_samples) - clf.predict(x_train)) < E
|
assert norm(np.zeros(n_samples) - clf.predict(x_train)) < E
|
||||||
|
|
||||||
# Support vector machines with quadratic kernels perform almost perfectly
|
# Support vector machines with quadratic kernels perform almost perfectly
|
||||||
# on this data set, so predictor should return their prediction.
|
# on this data set, so predictor should return their prediction.
|
||||||
clf = CrossValidatedClassifier(classifier=SVC(probability=True,
|
clf = CrossValidatedClassifier(
|
||||||
kernel='poly',
|
classifier=SVC(probability=True, kernel="poly", degree=2, random_state=42),
|
||||||
degree=2,
|
threshold=0.90,
|
||||||
random_state=42),
|
cv=30,
|
||||||
threshold=0.90,
|
)
|
||||||
cv=30)
|
|
||||||
clf.fit(x_train, y_train)
|
clf.fit(x_train, y_train)
|
||||||
print(y_train - clf.predict(x_train))
|
print(y_train - clf.predict(x_train))
|
||||||
assert norm(y_train - clf.predict(x_train)) < E
|
assert norm(y_train - clf.predict(x_train)) < E
|
||||||
|
|||||||
@@ -17,4 +17,3 @@ def test_evaluator():
|
|||||||
ev = ClassifierEvaluator()
|
ev = ClassifierEvaluator()
|
||||||
assert ev.evaluate(clf_a, x_train, y_train) == 1.0
|
assert ev.evaluate(clf_a, x_train, y_train) == 1.0
|
||||||
assert ev.evaluate(clf_b, x_train, y_train) == 0.5
|
assert ev.evaluate(clf_b, x_train, y_train) == 0.5
|
||||||
|
|
||||||
|
|||||||
@@ -11,12 +11,16 @@ from miplearn.classifiers.threshold import MinPrecisionThreshold
|
|||||||
|
|
||||||
def test_threshold_dynamic():
|
def test_threshold_dynamic():
|
||||||
clf = Mock(spec=Classifier)
|
clf = Mock(spec=Classifier)
|
||||||
clf.predict_proba = Mock(return_value=np.array([
|
clf.predict_proba = Mock(
|
||||||
[0.10, 0.90],
|
return_value=np.array(
|
||||||
[0.10, 0.90],
|
[
|
||||||
[0.20, 0.80],
|
[0.10, 0.90],
|
||||||
[0.30, 0.70],
|
[0.10, 0.90],
|
||||||
]))
|
[0.20, 0.80],
|
||||||
|
[0.30, 0.70],
|
||||||
|
]
|
||||||
|
)
|
||||||
|
)
|
||||||
x_train = np.array([0, 1, 2, 3])
|
x_train = np.array([0, 1, 2, 3])
|
||||||
y_train = np.array([1, 1, 0, 0])
|
y_train = np.array([1, 1, 0, 0])
|
||||||
|
|
||||||
@@ -31,4 +35,3 @@ def test_threshold_dynamic():
|
|||||||
|
|
||||||
threshold = MinPrecisionThreshold(min_precision=0.00)
|
threshold = MinPrecisionThreshold(min_precision=0.00)
|
||||||
assert threshold.find(clf, x_train, y_train) == 0.70
|
assert threshold.find(clf, x_train, y_train) == 0.70
|
||||||
|
|
||||||
|
|||||||
@@ -30,11 +30,15 @@ class MinPrecisionThreshold(DynamicThreshold):
|
|||||||
def find(self, clf, x_train, y_train):
|
def find(self, clf, x_train, y_train):
|
||||||
proba = clf.predict_proba(x_train)
|
proba = clf.predict_proba(x_train)
|
||||||
|
|
||||||
assert isinstance(proba, np.ndarray), \
|
assert isinstance(proba, np.ndarray), "classifier should return numpy array"
|
||||||
"classifier should return numpy array"
|
assert proba.shape == (
|
||||||
assert proba.shape == (x_train.shape[0], 2), \
|
x_train.shape[0],
|
||||||
"classifier should return (%d,%d)-shaped array, not %s" % (
|
2,
|
||||||
x_train.shape[0], 2, str(proba.shape))
|
), "classifier should return (%d,%d)-shaped array, not %s" % (
|
||||||
|
x_train.shape[0],
|
||||||
|
2,
|
||||||
|
str(proba.shape),
|
||||||
|
)
|
||||||
|
|
||||||
fps, tps, thresholds = _binary_clf_curve(y_train, proba[:, 1])
|
fps, tps, thresholds = _binary_clf_curve(y_train, proba[:, 1])
|
||||||
precision = tps / (tps + fps)
|
precision = tps / (tps + fps)
|
||||||
|
|||||||
@@ -19,9 +19,11 @@ class UserCutsComponent(Component):
|
|||||||
A component that predicts which user cuts to enforce.
|
A component that predicts which user cuts to enforce.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self,
|
def __init__(
|
||||||
classifier=CountingClassifier(),
|
self,
|
||||||
threshold=0.05):
|
classifier=CountingClassifier(),
|
||||||
|
threshold=0.05,
|
||||||
|
):
|
||||||
self.violations = set()
|
self.violations = set()
|
||||||
self.count = {}
|
self.count = {}
|
||||||
self.n_samples = 0
|
self.n_samples = 0
|
||||||
@@ -56,10 +58,11 @@ class UserCutsComponent(Component):
|
|||||||
violation_to_instance_idx[v] = []
|
violation_to_instance_idx[v] = []
|
||||||
violation_to_instance_idx[v] += [idx]
|
violation_to_instance_idx[v] += [idx]
|
||||||
|
|
||||||
for (v, classifier) in tqdm(self.classifiers.items(),
|
for (v, classifier) in tqdm(
|
||||||
desc="Fit (user cuts)",
|
self.classifiers.items(),
|
||||||
disable=not sys.stdout.isatty(),
|
desc="Fit (user cuts)",
|
||||||
):
|
disable=not sys.stdout.isatty(),
|
||||||
|
):
|
||||||
logger.debug("Training: %s" % (str(v)))
|
logger.debug("Training: %s" % (str(v)))
|
||||||
label = np.zeros(len(training_instances))
|
label = np.zeros(len(training_instances))
|
||||||
label[violation_to_instance_idx[v]] = 1.0
|
label[violation_to_instance_idx[v]] = 1.0
|
||||||
@@ -79,10 +82,11 @@ class UserCutsComponent(Component):
|
|||||||
all_violations = set()
|
all_violations = set()
|
||||||
for instance in instances:
|
for instance in instances:
|
||||||
all_violations |= set(instance.found_violated_user_cuts)
|
all_violations |= set(instance.found_violated_user_cuts)
|
||||||
for idx in tqdm(range(len(instances)),
|
for idx in tqdm(
|
||||||
desc="Evaluate (lazy)",
|
range(len(instances)),
|
||||||
disable=not sys.stdout.isatty(),
|
desc="Evaluate (lazy)",
|
||||||
):
|
disable=not sys.stdout.isatty(),
|
||||||
|
):
|
||||||
instance = instances[idx]
|
instance = instances[idx]
|
||||||
condition_positive = set(instance.found_violated_user_cuts)
|
condition_positive = set(instance.found_violated_user_cuts)
|
||||||
condition_negative = all_violations - condition_positive
|
condition_negative = all_violations - condition_positive
|
||||||
|
|||||||
@@ -19,9 +19,11 @@ class DynamicLazyConstraintsComponent(Component):
|
|||||||
A component that predicts which lazy constraints to enforce.
|
A component that predicts which lazy constraints to enforce.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self,
|
def __init__(
|
||||||
classifier=CountingClassifier(),
|
self,
|
||||||
threshold=0.05):
|
classifier=CountingClassifier(),
|
||||||
|
threshold=0.05,
|
||||||
|
):
|
||||||
self.violations = set()
|
self.violations = set()
|
||||||
self.count = {}
|
self.count = {}
|
||||||
self.n_samples = 0
|
self.n_samples = 0
|
||||||
@@ -68,10 +70,11 @@ class DynamicLazyConstraintsComponent(Component):
|
|||||||
violation_to_instance_idx[v] = []
|
violation_to_instance_idx[v] = []
|
||||||
violation_to_instance_idx[v] += [idx]
|
violation_to_instance_idx[v] += [idx]
|
||||||
|
|
||||||
for (v, classifier) in tqdm(self.classifiers.items(),
|
for (v, classifier) in tqdm(
|
||||||
desc="Fit (lazy)",
|
self.classifiers.items(),
|
||||||
disable=not sys.stdout.isatty(),
|
desc="Fit (lazy)",
|
||||||
):
|
disable=not sys.stdout.isatty(),
|
||||||
|
):
|
||||||
logger.debug("Training: %s" % (str(v)))
|
logger.debug("Training: %s" % (str(v)))
|
||||||
label = np.zeros(len(training_instances))
|
label = np.zeros(len(training_instances))
|
||||||
label[violation_to_instance_idx[v]] = 1.0
|
label[violation_to_instance_idx[v]] = 1.0
|
||||||
@@ -91,10 +94,11 @@ class DynamicLazyConstraintsComponent(Component):
|
|||||||
all_violations = set()
|
all_violations = set()
|
||||||
for instance in instances:
|
for instance in instances:
|
||||||
all_violations |= set(instance.found_violated_lazy_constraints)
|
all_violations |= set(instance.found_violated_lazy_constraints)
|
||||||
for idx in tqdm(range(len(instances)),
|
for idx in tqdm(
|
||||||
desc="Evaluate (lazy)",
|
range(len(instances)),
|
||||||
disable=not sys.stdout.isatty(),
|
desc="Evaluate (lazy)",
|
||||||
):
|
disable=not sys.stdout.isatty(),
|
||||||
|
):
|
||||||
instance = instances[idx]
|
instance = instances[idx]
|
||||||
condition_positive = set(instance.found_violated_lazy_constraints)
|
condition_positive = set(instance.found_violated_lazy_constraints)
|
||||||
condition_negative = all_violations - condition_positive
|
condition_negative = all_violations - condition_positive
|
||||||
|
|||||||
@@ -19,13 +19,14 @@ class LazyConstraint:
|
|||||||
|
|
||||||
|
|
||||||
class StaticLazyConstraintsComponent(Component):
|
class StaticLazyConstraintsComponent(Component):
|
||||||
def __init__(self,
|
def __init__(
|
||||||
classifier=CountingClassifier(),
|
self,
|
||||||
threshold=0.05,
|
classifier=CountingClassifier(),
|
||||||
use_two_phase_gap=True,
|
threshold=0.05,
|
||||||
large_gap=1e-2,
|
use_two_phase_gap=True,
|
||||||
violation_tolerance=-0.5,
|
large_gap=1e-2,
|
||||||
):
|
violation_tolerance=-0.5,
|
||||||
|
):
|
||||||
self.threshold = threshold
|
self.threshold = threshold
|
||||||
self.classifier_prototype = classifier
|
self.classifier_prototype = classifier
|
||||||
self.classifiers = {}
|
self.classifiers = {}
|
||||||
@@ -74,32 +75,38 @@ class StaticLazyConstraintsComponent(Component):
|
|||||||
logger.debug("Finding violated lazy constraints...")
|
logger.debug("Finding violated lazy constraints...")
|
||||||
constraints_to_add = []
|
constraints_to_add = []
|
||||||
for c in self.pool:
|
for c in self.pool:
|
||||||
if not solver.internal_solver.is_constraint_satisfied(c.obj,
|
if not solver.internal_solver.is_constraint_satisfied(
|
||||||
tol=self.violation_tolerance):
|
c.obj, tol=self.violation_tolerance
|
||||||
|
):
|
||||||
constraints_to_add.append(c)
|
constraints_to_add.append(c)
|
||||||
for c in constraints_to_add:
|
for c in constraints_to_add:
|
||||||
self.pool.remove(c)
|
self.pool.remove(c)
|
||||||
solver.internal_solver.add_constraint(c.obj)
|
solver.internal_solver.add_constraint(c.obj)
|
||||||
instance.found_violated_lazy_constraints += [c.cid]
|
instance.found_violated_lazy_constraints += [c.cid]
|
||||||
if len(constraints_to_add) > 0:
|
if len(constraints_to_add) > 0:
|
||||||
logger.info("%8d lazy constraints added %8d in the pool" % (len(constraints_to_add), len(self.pool)))
|
logger.info(
|
||||||
|
"%8d lazy constraints added %8d in the pool"
|
||||||
|
% (len(constraints_to_add), len(self.pool))
|
||||||
|
)
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def fit(self, training_instances):
|
def fit(self, training_instances):
|
||||||
training_instances = [t
|
training_instances = [
|
||||||
for t in training_instances
|
t
|
||||||
if hasattr(t, "found_violated_lazy_constraints")]
|
for t in training_instances
|
||||||
|
if hasattr(t, "found_violated_lazy_constraints")
|
||||||
|
]
|
||||||
|
|
||||||
logger.debug("Extracting x and y...")
|
logger.debug("Extracting x and y...")
|
||||||
x = self.x(training_instances)
|
x = self.x(training_instances)
|
||||||
y = self.y(training_instances)
|
y = self.y(training_instances)
|
||||||
|
|
||||||
logger.debug("Fitting...")
|
logger.debug("Fitting...")
|
||||||
for category in tqdm(x.keys(),
|
for category in tqdm(
|
||||||
desc="Fit (lazy)",
|
x.keys(), desc="Fit (lazy)", disable=not sys.stdout.isatty()
|
||||||
disable=not sys.stdout.isatty()):
|
):
|
||||||
if category not in self.classifiers:
|
if category not in self.classifiers:
|
||||||
self.classifiers[category] = deepcopy(self.classifier_prototype)
|
self.classifiers[category] = deepcopy(self.classifier_prototype)
|
||||||
self.classifiers[category].fit(x[category], y[category])
|
self.classifiers[category].fit(x[category], y[category])
|
||||||
@@ -121,8 +128,10 @@ class StaticLazyConstraintsComponent(Component):
|
|||||||
x[category] = []
|
x[category] = []
|
||||||
constraints[category] = []
|
constraints[category] = []
|
||||||
x[category] += [instance.get_constraint_features(cid)]
|
x[category] += [instance.get_constraint_features(cid)]
|
||||||
c = LazyConstraint(cid=cid,
|
c = LazyConstraint(
|
||||||
obj=solver.internal_solver.extract_constraint(cid))
|
cid=cid,
|
||||||
|
obj=solver.internal_solver.extract_constraint(cid),
|
||||||
|
)
|
||||||
constraints[category] += [c]
|
constraints[category] += [c]
|
||||||
self.pool.append(c)
|
self.pool.append(c)
|
||||||
logger.info("%8d lazy constraints extracted" % len(self.pool))
|
logger.info("%8d lazy constraints extracted" % len(self.pool))
|
||||||
@@ -141,7 +150,13 @@ class StaticLazyConstraintsComponent(Component):
|
|||||||
self.pool.remove(c)
|
self.pool.remove(c)
|
||||||
solver.internal_solver.add_constraint(c.obj)
|
solver.internal_solver.add_constraint(c.obj)
|
||||||
instance.found_violated_lazy_constraints += [c.cid]
|
instance.found_violated_lazy_constraints += [c.cid]
|
||||||
logger.info("%8d lazy constraints added %8d in the pool" % (n_added, len(self.pool)))
|
logger.info(
|
||||||
|
"%8d lazy constraints added %8d in the pool"
|
||||||
|
% (
|
||||||
|
n_added,
|
||||||
|
len(self.pool),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
def _collect_constraints(self, train_instances):
|
def _collect_constraints(self, train_instances):
|
||||||
constraints = {}
|
constraints = {}
|
||||||
|
|||||||
@@ -1,13 +1,20 @@
|
|||||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||||
# Released under the modified BSD license. See COPYING.md for more details.
|
# Released under the modified BSD license. See COPYING.md for more details.
|
||||||
from sklearn.metrics import mean_squared_error, explained_variance_score, max_error, mean_absolute_error, r2_score
|
from sklearn.metrics import (
|
||||||
|
mean_squared_error,
|
||||||
|
explained_variance_score,
|
||||||
|
max_error,
|
||||||
|
mean_absolute_error,
|
||||||
|
r2_score,
|
||||||
|
)
|
||||||
|
|
||||||
from .. import Component, InstanceFeaturesExtractor, ObjectiveValueExtractor
|
from .. import Component, InstanceFeaturesExtractor, ObjectiveValueExtractor
|
||||||
from sklearn.linear_model import LinearRegression
|
from sklearn.linear_model import LinearRegression
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
@@ -15,8 +22,8 @@ class ObjectiveValueComponent(Component):
|
|||||||
"""
|
"""
|
||||||
A Component which predicts the optimal objective value of the problem.
|
A Component which predicts the optimal objective value of the problem.
|
||||||
"""
|
"""
|
||||||
def __init__(self,
|
|
||||||
regressor=LinearRegression()):
|
def __init__(self, regressor=LinearRegression()):
|
||||||
self.ub_regressor = None
|
self.ub_regressor = None
|
||||||
self.lb_regressor = None
|
self.lb_regressor = None
|
||||||
self.regressor_prototype = regressor
|
self.regressor_prototype = regressor
|
||||||
|
|||||||
@@ -19,10 +19,12 @@ class PrimalSolutionComponent(Component):
|
|||||||
A component that predicts primal solutions.
|
A component that predicts primal solutions.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self,
|
def __init__(
|
||||||
classifier=AdaptiveClassifier(),
|
self,
|
||||||
mode="exact",
|
classifier=AdaptiveClassifier(),
|
||||||
threshold=MinPrecisionThreshold(0.98)):
|
mode="exact",
|
||||||
|
threshold=MinPrecisionThreshold(0.98),
|
||||||
|
):
|
||||||
self.mode = mode
|
self.mode = mode
|
||||||
self.classifiers = {}
|
self.classifiers = {}
|
||||||
self.thresholds = {}
|
self.thresholds = {}
|
||||||
@@ -51,9 +53,10 @@ class PrimalSolutionComponent(Component):
|
|||||||
features = VariableFeaturesExtractor().extract(training_instances)
|
features = VariableFeaturesExtractor().extract(training_instances)
|
||||||
solutions = SolutionExtractor().extract(training_instances)
|
solutions = SolutionExtractor().extract(training_instances)
|
||||||
|
|
||||||
for category in tqdm(features.keys(),
|
for category in tqdm(
|
||||||
desc="Fit (primal)",
|
features.keys(),
|
||||||
):
|
desc="Fit (primal)",
|
||||||
|
):
|
||||||
x_train = features[category]
|
x_train = features[category]
|
||||||
for label in [0, 1]:
|
for label in [0, 1]:
|
||||||
y_train = solutions[category][:, label].astype(int)
|
y_train = solutions[category][:, label].astype(int)
|
||||||
@@ -74,9 +77,15 @@ class PrimalSolutionComponent(Component):
|
|||||||
|
|
||||||
# Find threshold (dynamic or static)
|
# Find threshold (dynamic or static)
|
||||||
if isinstance(self.threshold_prototype, DynamicThreshold):
|
if isinstance(self.threshold_prototype, DynamicThreshold):
|
||||||
self.thresholds[category, label] = self.threshold_prototype.find(clf, x_train, y_train)
|
self.thresholds[category, label] = self.threshold_prototype.find(
|
||||||
|
clf,
|
||||||
|
x_train,
|
||||||
|
y_train,
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
self.thresholds[category, label] = deepcopy(self.threshold_prototype)
|
self.thresholds[category, label] = deepcopy(
|
||||||
|
self.threshold_prototype
|
||||||
|
)
|
||||||
|
|
||||||
self.classifiers[category, label] = clf
|
self.classifiers[category, label] = clf
|
||||||
|
|
||||||
@@ -98,18 +107,21 @@ class PrimalSolutionComponent(Component):
|
|||||||
ws = np.array([[1 - clf, clf] for _ in range(n)])
|
ws = np.array([[1 - clf, clf] for _ in range(n)])
|
||||||
else:
|
else:
|
||||||
ws = clf.predict_proba(x_test[category])
|
ws = clf.predict_proba(x_test[category])
|
||||||
assert ws.shape == (n, 2), "ws.shape should be (%d, 2) not %s" % (n, ws.shape)
|
assert ws.shape == (n, 2), "ws.shape should be (%d, 2) not %s" % (
|
||||||
|
n,
|
||||||
|
ws.shape,
|
||||||
|
)
|
||||||
for (i, (var, index)) in enumerate(var_split[category]):
|
for (i, (var, index)) in enumerate(var_split[category]):
|
||||||
if ws[i, 1] >= self.thresholds[category, label]:
|
if ws[i, 1] >= self.thresholds[category, label]:
|
||||||
solution[var][index] = label
|
solution[var][index] = label
|
||||||
return solution
|
return solution
|
||||||
|
|
||||||
def evaluate(self, instances):
|
def evaluate(self, instances):
|
||||||
ev = {"Fix zero": {},
|
ev = {"Fix zero": {}, "Fix one": {}}
|
||||||
"Fix one": {}}
|
for instance_idx in tqdm(
|
||||||
for instance_idx in tqdm(range(len(instances)),
|
range(len(instances)),
|
||||||
desc="Evaluate (primal)",
|
desc="Evaluate (primal)",
|
||||||
):
|
):
|
||||||
instance = instances[instance_idx]
|
instance = instances[instance_idx]
|
||||||
solution_actual = instance.solution
|
solution_actual = instance.solution
|
||||||
solution_pred = self.predict(instance)
|
solution_pred = self.predict(instance)
|
||||||
@@ -143,6 +155,10 @@ class PrimalSolutionComponent(Component):
|
|||||||
tn_one = len(pred_one_negative & vars_zero)
|
tn_one = len(pred_one_negative & vars_zero)
|
||||||
fn_one = len(pred_one_negative & vars_one)
|
fn_one = len(pred_one_negative & vars_one)
|
||||||
|
|
||||||
ev["Fix zero"][instance_idx] = classifier_evaluation_dict(tp_zero, tn_zero, fp_zero, fn_zero)
|
ev["Fix zero"][instance_idx] = classifier_evaluation_dict(
|
||||||
ev["Fix one"][instance_idx] = classifier_evaluation_dict(tp_one, tn_one, fp_one, fn_one)
|
tp_zero, tn_zero, fp_zero, fn_zero
|
||||||
|
)
|
||||||
|
ev["Fix one"][instance_idx] = classifier_evaluation_dict(
|
||||||
|
tp_one, tn_one, fp_one, fn_one
|
||||||
|
)
|
||||||
return ev
|
return ev
|
||||||
|
|||||||
@@ -51,14 +51,15 @@ class RelaxationComponent(Component):
|
|||||||
If `check_dropped` is true, set the maximum number of iterations in the lazy constraint loop.
|
If `check_dropped` is true, set the maximum number of iterations in the lazy constraint loop.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self,
|
def __init__(
|
||||||
classifier=CountingClassifier(),
|
self,
|
||||||
threshold=0.95,
|
classifier=CountingClassifier(),
|
||||||
slack_tolerance=1e-5,
|
threshold=0.95,
|
||||||
check_dropped=False,
|
slack_tolerance=1e-5,
|
||||||
violation_tolerance=1e-5,
|
check_dropped=False,
|
||||||
max_iterations=3,
|
violation_tolerance=1e-5,
|
||||||
):
|
max_iterations=3,
|
||||||
|
):
|
||||||
self.classifiers = {}
|
self.classifiers = {}
|
||||||
self.classifier_prototype = classifier
|
self.classifier_prototype = classifier
|
||||||
self.threshold = threshold
|
self.threshold = threshold
|
||||||
@@ -77,16 +78,20 @@ class RelaxationComponent(Component):
|
|||||||
|
|
||||||
logger.info("Predicting redundant LP constraints...")
|
logger.info("Predicting redundant LP constraints...")
|
||||||
cids = solver.internal_solver.get_constraint_ids()
|
cids = solver.internal_solver.get_constraint_ids()
|
||||||
x, constraints = self.x([instance],
|
x, constraints = self.x(
|
||||||
constraint_ids=cids,
|
[instance],
|
||||||
return_constraints=True)
|
constraint_ids=cids,
|
||||||
|
return_constraints=True,
|
||||||
|
)
|
||||||
y = self.predict(x)
|
y = self.predict(x)
|
||||||
for category in y.keys():
|
for category in y.keys():
|
||||||
for i in range(len(y[category])):
|
for i in range(len(y[category])):
|
||||||
if y[category][i][0] == 1:
|
if y[category][i][0] == 1:
|
||||||
cid = constraints[category][i]
|
cid = constraints[category][i]
|
||||||
c = LazyConstraint(cid=cid,
|
c = LazyConstraint(
|
||||||
obj=solver.internal_solver.extract_constraint(cid))
|
cid=cid,
|
||||||
|
obj=solver.internal_solver.extract_constraint(cid),
|
||||||
|
)
|
||||||
self.pool += [c]
|
self.pool += [c]
|
||||||
logger.info("Extracted %d predicted constraints" % len(self.pool))
|
logger.info("Extracted %d predicted constraints" % len(self.pool))
|
||||||
|
|
||||||
@@ -98,21 +103,19 @@ class RelaxationComponent(Component):
|
|||||||
x = self.x(training_instances)
|
x = self.x(training_instances)
|
||||||
y = self.y(training_instances)
|
y = self.y(training_instances)
|
||||||
logger.debug("Fitting...")
|
logger.debug("Fitting...")
|
||||||
for category in tqdm(x.keys(),
|
for category in tqdm(x.keys(), desc="Fit (relaxation)"):
|
||||||
desc="Fit (relaxation)"):
|
|
||||||
if category not in self.classifiers:
|
if category not in self.classifiers:
|
||||||
self.classifiers[category] = deepcopy(self.classifier_prototype)
|
self.classifiers[category] = deepcopy(self.classifier_prototype)
|
||||||
self.classifiers[category].fit(x[category], y[category])
|
self.classifiers[category].fit(x[category], y[category])
|
||||||
|
|
||||||
def x(self,
|
def x(self, instances, constraint_ids=None, return_constraints=False):
|
||||||
instances,
|
|
||||||
constraint_ids=None,
|
|
||||||
return_constraints=False):
|
|
||||||
x = {}
|
x = {}
|
||||||
constraints = {}
|
constraints = {}
|
||||||
for instance in tqdm(InstanceIterator(instances),
|
for instance in tqdm(
|
||||||
desc="Extract (relaxation:x)",
|
InstanceIterator(instances),
|
||||||
disable=len(instances) < 5):
|
desc="Extract (relaxation:x)",
|
||||||
|
disable=len(instances) < 5,
|
||||||
|
):
|
||||||
if constraint_ids is not None:
|
if constraint_ids is not None:
|
||||||
cids = constraint_ids
|
cids = constraint_ids
|
||||||
else:
|
else:
|
||||||
@@ -133,9 +136,11 @@ class RelaxationComponent(Component):
|
|||||||
|
|
||||||
def y(self, instances):
|
def y(self, instances):
|
||||||
y = {}
|
y = {}
|
||||||
for instance in tqdm(InstanceIterator(instances),
|
for instance in tqdm(
|
||||||
desc="Extract (relaxation:y)",
|
InstanceIterator(instances),
|
||||||
disable=len(instances) < 5):
|
desc="Extract (relaxation:y)",
|
||||||
|
disable=len(instances) < 5,
|
||||||
|
):
|
||||||
for (cid, slack) in instance.slacks.items():
|
for (cid, slack) in instance.slacks.items():
|
||||||
category = instance.get_constraint_category(cid)
|
category = instance.get_constraint_category(cid)
|
||||||
if category is None:
|
if category is None:
|
||||||
@@ -154,7 +159,7 @@ class RelaxationComponent(Component):
|
|||||||
if category not in self.classifiers:
|
if category not in self.classifiers:
|
||||||
continue
|
continue
|
||||||
y[category] = []
|
y[category] = []
|
||||||
#x_cat = np.array(x_cat)
|
# x_cat = np.array(x_cat)
|
||||||
proba = self.classifiers[category].predict_proba(x_cat)
|
proba = self.classifiers[category].predict_proba(x_cat)
|
||||||
for i in range(len(proba)):
|
for i in range(len(proba)):
|
||||||
if proba[i][1] >= self.threshold:
|
if proba[i][1] >= self.threshold:
|
||||||
@@ -191,13 +196,19 @@ class RelaxationComponent(Component):
|
|||||||
logger.debug("Checking that dropped constraints are satisfied...")
|
logger.debug("Checking that dropped constraints are satisfied...")
|
||||||
constraints_to_add = []
|
constraints_to_add = []
|
||||||
for c in self.pool:
|
for c in self.pool:
|
||||||
if not solver.internal_solver.is_constraint_satisfied(c.obj, self.violation_tolerance):
|
if not solver.internal_solver.is_constraint_satisfied(
|
||||||
|
c.obj,
|
||||||
|
self.violation_tolerance,
|
||||||
|
):
|
||||||
constraints_to_add.append(c)
|
constraints_to_add.append(c)
|
||||||
for c in constraints_to_add:
|
for c in constraints_to_add:
|
||||||
self.pool.remove(c)
|
self.pool.remove(c)
|
||||||
solver.internal_solver.add_constraint(c.obj)
|
solver.internal_solver.add_constraint(c.obj)
|
||||||
if len(constraints_to_add) > 0:
|
if len(constraints_to_add) > 0:
|
||||||
logger.info("%8d constraints %8d in the pool" % (len(constraints_to_add), len(self.pool)))
|
logger.info(
|
||||||
|
"%8d constraints %8d in the pool"
|
||||||
|
% (len(constraints_to_add), len(self.pool))
|
||||||
|
)
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
return False
|
return False
|
||||||
|
|||||||
@@ -28,9 +28,9 @@ def test_lazy_fit():
|
|||||||
assert "c" in component.classifiers
|
assert "c" in component.classifiers
|
||||||
|
|
||||||
# Should provide correct x_train to each classifier
|
# Should provide correct x_train to each classifier
|
||||||
expected_x_train_a = np.array([[67., 21.75, 1287.92], [70., 23.75, 1199.83]])
|
expected_x_train_a = np.array([[67.0, 21.75, 1287.92], [70.0, 23.75, 1199.83]])
|
||||||
expected_x_train_b = np.array([[67., 21.75, 1287.92], [70., 23.75, 1199.83]])
|
expected_x_train_b = np.array([[67.0, 21.75, 1287.92], [70.0, 23.75, 1199.83]])
|
||||||
expected_x_train_c = np.array([[67., 21.75, 1287.92], [70., 23.75, 1199.83]])
|
expected_x_train_c = np.array([[67.0, 21.75, 1287.92], [70.0, 23.75, 1199.83]])
|
||||||
actual_x_train_a = component.classifiers["a"].fit.call_args[0][0]
|
actual_x_train_a = component.classifiers["a"].fit.call_args[0][0]
|
||||||
actual_x_train_b = component.classifiers["b"].fit.call_args[0][0]
|
actual_x_train_b = component.classifiers["b"].fit.call_args[0][0]
|
||||||
actual_x_train_c = component.classifiers["c"].fit.call_args[0][0]
|
actual_x_train_c = component.classifiers["c"].fit.call_args[0][0]
|
||||||
@@ -56,16 +56,15 @@ def test_lazy_before():
|
|||||||
solver = LearningSolver()
|
solver = LearningSolver()
|
||||||
solver.internal_solver = Mock(spec=InternalSolver)
|
solver.internal_solver = Mock(spec=InternalSolver)
|
||||||
component = DynamicLazyConstraintsComponent(threshold=0.10)
|
component = DynamicLazyConstraintsComponent(threshold=0.10)
|
||||||
component.classifiers = {"a": Mock(spec=Classifier),
|
component.classifiers = {"a": Mock(spec=Classifier), "b": Mock(spec=Classifier)}
|
||||||
"b": Mock(spec=Classifier)}
|
|
||||||
component.classifiers["a"].predict_proba = Mock(return_value=[[0.95, 0.05]])
|
component.classifiers["a"].predict_proba = Mock(return_value=[[0.95, 0.05]])
|
||||||
component.classifiers["b"].predict_proba = Mock(return_value=[[0.02, 0.80]])
|
component.classifiers["b"].predict_proba = Mock(return_value=[[0.02, 0.80]])
|
||||||
|
|
||||||
component.before_solve(solver, instances[0], models[0])
|
component.before_solve(solver, instances[0], models[0])
|
||||||
|
|
||||||
# Should ask classifier likelihood of each constraint being violated
|
# Should ask classifier likelihood of each constraint being violated
|
||||||
expected_x_test_a = np.array([[67., 21.75, 1287.92]])
|
expected_x_test_a = np.array([[67.0, 21.75, 1287.92]])
|
||||||
expected_x_test_b = np.array([[67., 21.75, 1287.92]])
|
expected_x_test_b = np.array([[67.0, 21.75, 1287.92]])
|
||||||
actual_x_test_a = component.classifiers["a"].predict_proba.call_args[0][0]
|
actual_x_test_a = component.classifiers["a"].predict_proba.call_args[0][0]
|
||||||
actual_x_test_b = component.classifiers["b"].predict_proba.call_args[0][0]
|
actual_x_test_b = component.classifiers["b"].predict_proba.call_args[0][0]
|
||||||
assert norm(expected_x_test_a - actual_x_test_a) < E
|
assert norm(expected_x_test_a - actual_x_test_a) < E
|
||||||
@@ -82,9 +81,11 @@ def test_lazy_before():
|
|||||||
def test_lazy_evaluate():
|
def test_lazy_evaluate():
|
||||||
instances, models = get_test_pyomo_instances()
|
instances, models = get_test_pyomo_instances()
|
||||||
component = DynamicLazyConstraintsComponent()
|
component = DynamicLazyConstraintsComponent()
|
||||||
component.classifiers = {"a": Mock(spec=Classifier),
|
component.classifiers = {
|
||||||
"b": Mock(spec=Classifier),
|
"a": Mock(spec=Classifier),
|
||||||
"c": Mock(spec=Classifier)}
|
"b": Mock(spec=Classifier),
|
||||||
|
"c": Mock(spec=Classifier),
|
||||||
|
}
|
||||||
component.classifiers["a"].predict_proba = Mock(return_value=[[1.0, 0.0]])
|
component.classifiers["a"].predict_proba = Mock(return_value=[[1.0, 0.0]])
|
||||||
component.classifiers["b"].predict_proba = Mock(return_value=[[0.0, 1.0]])
|
component.classifiers["b"].predict_proba = Mock(return_value=[[0.0, 1.0]])
|
||||||
component.classifiers["c"].predict_proba = Mock(return_value=[[0.0, 1.0]])
|
component.classifiers["c"].predict_proba = Mock(return_value=[[0.0, 1.0]])
|
||||||
@@ -96,7 +97,7 @@ def test_lazy_evaluate():
|
|||||||
"Accuracy": 0.75,
|
"Accuracy": 0.75,
|
||||||
"F1 score": 0.8,
|
"F1 score": 0.8,
|
||||||
"Precision": 1.0,
|
"Precision": 1.0,
|
||||||
"Recall": 2/3.,
|
"Recall": 2 / 3.0,
|
||||||
"Predicted positive": 2,
|
"Predicted positive": 2,
|
||||||
"Predicted negative": 2,
|
"Predicted negative": 2,
|
||||||
"Condition positive": 3,
|
"Condition positive": 3,
|
||||||
@@ -135,6 +136,5 @@ def test_lazy_evaluate():
|
|||||||
"False positive (%)": 25.0,
|
"False positive (%)": 25.0,
|
||||||
"True negative (%)": 25.0,
|
"True negative (%)": 25.0,
|
||||||
"True positive (%)": 25.0,
|
"True positive (%)": 25.0,
|
||||||
}
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -4,10 +4,12 @@
|
|||||||
|
|
||||||
from unittest.mock import Mock, call
|
from unittest.mock import Mock, call
|
||||||
|
|
||||||
from miplearn import (StaticLazyConstraintsComponent,
|
from miplearn import (
|
||||||
LearningSolver,
|
StaticLazyConstraintsComponent,
|
||||||
Instance,
|
LearningSolver,
|
||||||
InternalSolver)
|
Instance,
|
||||||
|
InternalSolver,
|
||||||
|
)
|
||||||
from miplearn.classifiers import Classifier
|
from miplearn.classifiers import Classifier
|
||||||
|
|
||||||
|
|
||||||
@@ -23,39 +25,47 @@ def test_usage_with_solver():
|
|||||||
|
|
||||||
instance = Mock(spec=Instance)
|
instance = Mock(spec=Instance)
|
||||||
instance.has_static_lazy_constraints = Mock(return_value=True)
|
instance.has_static_lazy_constraints = Mock(return_value=True)
|
||||||
instance.is_constraint_lazy = Mock(side_effect=lambda cid: {
|
instance.is_constraint_lazy = Mock(
|
||||||
"c1": False,
|
side_effect=lambda cid: {
|
||||||
"c2": True,
|
"c1": False,
|
||||||
"c3": True,
|
"c2": True,
|
||||||
"c4": True,
|
"c3": True,
|
||||||
}[cid])
|
"c4": True,
|
||||||
instance.get_constraint_features = Mock(side_effect=lambda cid: {
|
}[cid]
|
||||||
"c2": [1.0, 0.0],
|
)
|
||||||
"c3": [0.5, 0.5],
|
instance.get_constraint_features = Mock(
|
||||||
"c4": [1.0],
|
side_effect=lambda cid: {
|
||||||
}[cid])
|
"c2": [1.0, 0.0],
|
||||||
instance.get_constraint_category = Mock(side_effect=lambda cid: {
|
"c3": [0.5, 0.5],
|
||||||
"c2": "type-a",
|
"c4": [1.0],
|
||||||
"c3": "type-a",
|
}[cid]
|
||||||
"c4": "type-b",
|
)
|
||||||
}[cid])
|
instance.get_constraint_category = Mock(
|
||||||
|
side_effect=lambda cid: {
|
||||||
|
"c2": "type-a",
|
||||||
|
"c3": "type-a",
|
||||||
|
"c4": "type-b",
|
||||||
|
}[cid]
|
||||||
|
)
|
||||||
|
|
||||||
component = StaticLazyConstraintsComponent(threshold=0.90,
|
component = StaticLazyConstraintsComponent(
|
||||||
use_two_phase_gap=False,
|
threshold=0.90, use_two_phase_gap=False, violation_tolerance=1.0
|
||||||
violation_tolerance=1.0)
|
)
|
||||||
component.classifiers = {
|
component.classifiers = {
|
||||||
"type-a": Mock(spec=Classifier),
|
"type-a": Mock(spec=Classifier),
|
||||||
"type-b": Mock(spec=Classifier),
|
"type-b": Mock(spec=Classifier),
|
||||||
}
|
}
|
||||||
component.classifiers["type-a"].predict_proba = \
|
component.classifiers["type-a"].predict_proba = Mock(
|
||||||
Mock(return_value=[
|
return_value=[
|
||||||
[0.20, 0.80],
|
[0.20, 0.80],
|
||||||
[0.05, 0.95],
|
[0.05, 0.95],
|
||||||
])
|
]
|
||||||
component.classifiers["type-b"].predict_proba = \
|
)
|
||||||
Mock(return_value=[
|
component.classifiers["type-b"].predict_proba = Mock(
|
||||||
|
return_value=[
|
||||||
[0.02, 0.98],
|
[0.02, 0.98],
|
||||||
])
|
]
|
||||||
|
)
|
||||||
|
|
||||||
# LearningSolver calls before_solve
|
# LearningSolver calls before_solve
|
||||||
component.before_solve(solver, instance, None)
|
component.before_solve(solver, instance, None)
|
||||||
@@ -67,37 +77,59 @@ def test_usage_with_solver():
|
|||||||
internal.get_constraint_ids.assert_called_once()
|
internal.get_constraint_ids.assert_called_once()
|
||||||
|
|
||||||
# Should ask if each constraint in the model is lazy
|
# Should ask if each constraint in the model is lazy
|
||||||
instance.is_constraint_lazy.assert_has_calls([
|
instance.is_constraint_lazy.assert_has_calls(
|
||||||
call("c1"), call("c2"), call("c3"), call("c4"),
|
[
|
||||||
])
|
call("c1"),
|
||||||
|
call("c2"),
|
||||||
|
call("c3"),
|
||||||
|
call("c4"),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
# For the lazy ones, should ask for features
|
# For the lazy ones, should ask for features
|
||||||
instance.get_constraint_features.assert_has_calls([
|
instance.get_constraint_features.assert_has_calls(
|
||||||
call("c2"), call("c3"), call("c4"),
|
[
|
||||||
])
|
call("c2"),
|
||||||
|
call("c3"),
|
||||||
|
call("c4"),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
# Should also ask for categories
|
# Should also ask for categories
|
||||||
assert instance.get_constraint_category.call_count == 3
|
assert instance.get_constraint_category.call_count == 3
|
||||||
instance.get_constraint_category.assert_has_calls([
|
instance.get_constraint_category.assert_has_calls(
|
||||||
call("c2"), call("c3"), call("c4"),
|
[
|
||||||
])
|
call("c2"),
|
||||||
|
call("c3"),
|
||||||
|
call("c4"),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
# Should ask internal solver to remove constraints identified as lazy
|
# Should ask internal solver to remove constraints identified as lazy
|
||||||
assert internal.extract_constraint.call_count == 3
|
assert internal.extract_constraint.call_count == 3
|
||||||
internal.extract_constraint.assert_has_calls([
|
internal.extract_constraint.assert_has_calls(
|
||||||
call("c2"), call("c3"), call("c4"),
|
[
|
||||||
])
|
call("c2"),
|
||||||
|
call("c3"),
|
||||||
|
call("c4"),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
# Should ask ML to predict whether each lazy constraint should be enforced
|
# Should ask ML to predict whether each lazy constraint should be enforced
|
||||||
component.classifiers["type-a"].predict_proba.assert_called_once_with([[1.0, 0.0], [0.5, 0.5]])
|
component.classifiers["type-a"].predict_proba.assert_called_once_with(
|
||||||
|
[[1.0, 0.0], [0.5, 0.5]]
|
||||||
|
)
|
||||||
component.classifiers["type-b"].predict_proba.assert_called_once_with([[1.0]])
|
component.classifiers["type-b"].predict_proba.assert_called_once_with([[1.0]])
|
||||||
|
|
||||||
# For the ones that should be enforced, should ask solver to re-add them
|
# For the ones that should be enforced, should ask solver to re-add them
|
||||||
# to the formulation. The remaining ones should remain in the pool.
|
# to the formulation. The remaining ones should remain in the pool.
|
||||||
assert internal.add_constraint.call_count == 2
|
assert internal.add_constraint.call_count == 2
|
||||||
internal.add_constraint.assert_has_calls([
|
internal.add_constraint.assert_has_calls(
|
||||||
call("<c3>"), call("<c4>"),
|
[
|
||||||
])
|
call("<c3>"),
|
||||||
|
call("<c4>"),
|
||||||
|
]
|
||||||
|
)
|
||||||
internal.add_constraint.reset_mock()
|
internal.add_constraint.reset_mock()
|
||||||
|
|
||||||
# LearningSolver calls after_iteration (first time)
|
# LearningSolver calls after_iteration (first time)
|
||||||
@@ -126,37 +158,45 @@ def test_usage_with_solver():
|
|||||||
def test_fit():
|
def test_fit():
|
||||||
instance_1 = Mock(spec=Instance)
|
instance_1 = Mock(spec=Instance)
|
||||||
instance_1.found_violated_lazy_constraints = ["c1", "c2", "c4", "c5"]
|
instance_1.found_violated_lazy_constraints = ["c1", "c2", "c4", "c5"]
|
||||||
instance_1.get_constraint_category = Mock(side_effect=lambda cid: {
|
instance_1.get_constraint_category = Mock(
|
||||||
"c1": "type-a",
|
side_effect=lambda cid: {
|
||||||
"c2": "type-a",
|
"c1": "type-a",
|
||||||
"c3": "type-a",
|
"c2": "type-a",
|
||||||
"c4": "type-b",
|
"c3": "type-a",
|
||||||
"c5": "type-b",
|
"c4": "type-b",
|
||||||
}[cid])
|
"c5": "type-b",
|
||||||
instance_1.get_constraint_features = Mock(side_effect=lambda cid: {
|
}[cid]
|
||||||
"c1": [1, 1],
|
)
|
||||||
"c2": [1, 2],
|
instance_1.get_constraint_features = Mock(
|
||||||
"c3": [1, 3],
|
side_effect=lambda cid: {
|
||||||
"c4": [1, 4, 0],
|
"c1": [1, 1],
|
||||||
"c5": [1, 5, 0],
|
"c2": [1, 2],
|
||||||
}[cid])
|
"c3": [1, 3],
|
||||||
|
"c4": [1, 4, 0],
|
||||||
|
"c5": [1, 5, 0],
|
||||||
|
}[cid]
|
||||||
|
)
|
||||||
|
|
||||||
instance_2 = Mock(spec=Instance)
|
instance_2 = Mock(spec=Instance)
|
||||||
instance_2.found_violated_lazy_constraints = ["c2", "c3", "c4"]
|
instance_2.found_violated_lazy_constraints = ["c2", "c3", "c4"]
|
||||||
instance_2.get_constraint_category = Mock(side_effect=lambda cid: {
|
instance_2.get_constraint_category = Mock(
|
||||||
"c1": "type-a",
|
side_effect=lambda cid: {
|
||||||
"c2": "type-a",
|
"c1": "type-a",
|
||||||
"c3": "type-a",
|
"c2": "type-a",
|
||||||
"c4": "type-b",
|
"c3": "type-a",
|
||||||
"c5": "type-b",
|
"c4": "type-b",
|
||||||
}[cid])
|
"c5": "type-b",
|
||||||
instance_2.get_constraint_features = Mock(side_effect=lambda cid: {
|
}[cid]
|
||||||
"c1": [2, 1],
|
)
|
||||||
"c2": [2, 2],
|
instance_2.get_constraint_features = Mock(
|
||||||
"c3": [2, 3],
|
side_effect=lambda cid: {
|
||||||
"c4": [2, 4, 0],
|
"c1": [2, 1],
|
||||||
"c5": [2, 5, 0],
|
"c2": [2, 2],
|
||||||
}[cid])
|
"c3": [2, 3],
|
||||||
|
"c4": [2, 4, 0],
|
||||||
|
"c5": [2, 5, 0],
|
||||||
|
}[cid]
|
||||||
|
)
|
||||||
|
|
||||||
instances = [instance_1, instance_2]
|
instances = [instance_1, instance_2]
|
||||||
component = StaticLazyConstraintsComponent()
|
component = StaticLazyConstraintsComponent()
|
||||||
@@ -171,18 +211,22 @@ def test_fit():
|
|||||||
}
|
}
|
||||||
expected_x = {
|
expected_x = {
|
||||||
"type-a": [[1, 1], [1, 2], [1, 3], [2, 1], [2, 2], [2, 3]],
|
"type-a": [[1, 1], [1, 2], [1, 3], [2, 1], [2, 2], [2, 3]],
|
||||||
"type-b": [[1, 4, 0], [1, 5, 0], [2, 4, 0], [2, 5, 0]]
|
"type-b": [[1, 4, 0], [1, 5, 0], [2, 4, 0], [2, 5, 0]],
|
||||||
}
|
}
|
||||||
expected_y = {
|
expected_y = {
|
||||||
"type-a": [[0, 1], [0, 1], [1, 0], [1, 0], [0, 1], [0, 1]],
|
"type-a": [[0, 1], [0, 1], [1, 0], [1, 0], [0, 1], [0, 1]],
|
||||||
"type-b": [[0, 1], [0, 1], [0, 1], [1, 0]]
|
"type-b": [[0, 1], [0, 1], [0, 1], [1, 0]],
|
||||||
}
|
}
|
||||||
assert component._collect_constraints(instances) == expected_constraints
|
assert component._collect_constraints(instances) == expected_constraints
|
||||||
assert component.x(instances) == expected_x
|
assert component.x(instances) == expected_x
|
||||||
assert component.y(instances) == expected_y
|
assert component.y(instances) == expected_y
|
||||||
|
|
||||||
component.fit(instances)
|
component.fit(instances)
|
||||||
component.classifiers["type-a"].fit.assert_called_once_with(expected_x["type-a"],
|
component.classifiers["type-a"].fit.assert_called_once_with(
|
||||||
expected_y["type-a"])
|
expected_x["type-a"],
|
||||||
component.classifiers["type-b"].fit.assert_called_once_with(expected_x["type-b"],
|
expected_y["type-a"],
|
||||||
expected_y["type-b"])
|
)
|
||||||
|
component.classifiers["type-b"].fit.assert_called_once_with(
|
||||||
|
expected_x["type-b"],
|
||||||
|
expected_y["type-b"],
|
||||||
|
)
|
||||||
|
|||||||
@@ -16,8 +16,10 @@ def test_usage():
|
|||||||
comp.fit(instances)
|
comp.fit(instances)
|
||||||
assert instances[0].lower_bound == 1183.0
|
assert instances[0].lower_bound == 1183.0
|
||||||
assert instances[0].upper_bound == 1183.0
|
assert instances[0].upper_bound == 1183.0
|
||||||
assert np.round(comp.predict(instances), 2).tolist() == [[1183.0, 1183.0],
|
assert np.round(comp.predict(instances), 2).tolist() == [
|
||||||
[1070.0, 1070.0]]
|
[1183.0, 1183.0],
|
||||||
|
[1070.0, 1070.0],
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
def test_obj_evaluate():
|
def test_obj_evaluate():
|
||||||
@@ -28,20 +30,20 @@ def test_obj_evaluate():
|
|||||||
comp.fit(instances)
|
comp.fit(instances)
|
||||||
ev = comp.evaluate(instances)
|
ev = comp.evaluate(instances)
|
||||||
assert ev == {
|
assert ev == {
|
||||||
'Lower bound': {
|
"Lower bound": {
|
||||||
'Explained variance': 0.0,
|
"Explained variance": 0.0,
|
||||||
'Max error': 183.0,
|
"Max error": 183.0,
|
||||||
'Mean absolute error': 126.5,
|
"Mean absolute error": 126.5,
|
||||||
'Mean squared error': 19194.5,
|
"Mean squared error": 19194.5,
|
||||||
'Median absolute error': 126.5,
|
"Median absolute error": 126.5,
|
||||||
'R2': -5.012843605607331,
|
"R2": -5.012843605607331,
|
||||||
|
},
|
||||||
|
"Upper bound": {
|
||||||
|
"Explained variance": 0.0,
|
||||||
|
"Max error": 183.0,
|
||||||
|
"Mean absolute error": 126.5,
|
||||||
|
"Mean squared error": 19194.5,
|
||||||
|
"Median absolute error": 126.5,
|
||||||
|
"R2": -5.012843605607331,
|
||||||
},
|
},
|
||||||
'Upper bound': {
|
|
||||||
'Explained variance': 0.0,
|
|
||||||
'Max error': 183.0,
|
|
||||||
'Mean absolute error': 126.5,
|
|
||||||
'Mean squared error': 19194.5,
|
|
||||||
'Median absolute error': 126.5,
|
|
||||||
'R2': -5.012843605607331,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -25,71 +25,82 @@ def test_predict():
|
|||||||
def test_evaluate():
|
def test_evaluate():
|
||||||
instances, models = get_test_pyomo_instances()
|
instances, models = get_test_pyomo_instances()
|
||||||
clf_zero = Mock(spec=Classifier)
|
clf_zero = Mock(spec=Classifier)
|
||||||
clf_zero.predict_proba = Mock(return_value=np.array([
|
clf_zero.predict_proba = Mock(
|
||||||
[0., 1.], # x[0]
|
return_value=np.array(
|
||||||
[0., 1.], # x[1]
|
[
|
||||||
[1., 0.], # x[2]
|
[0.0, 1.0], # x[0]
|
||||||
[1., 0.], # x[3]
|
[0.0, 1.0], # x[1]
|
||||||
]))
|
[1.0, 0.0], # x[2]
|
||||||
|
[1.0, 0.0], # x[3]
|
||||||
|
]
|
||||||
|
)
|
||||||
|
)
|
||||||
clf_one = Mock(spec=Classifier)
|
clf_one = Mock(spec=Classifier)
|
||||||
clf_one.predict_proba = Mock(return_value=np.array([
|
clf_one.predict_proba = Mock(
|
||||||
[1., 0.], # x[0] instances[0]
|
return_value=np.array(
|
||||||
[1., 0.], # x[1] instances[0]
|
[
|
||||||
[0., 1.], # x[2] instances[0]
|
[1.0, 0.0], # x[0] instances[0]
|
||||||
[1., 0.], # x[3] instances[0]
|
[1.0, 0.0], # x[1] instances[0]
|
||||||
]))
|
[0.0, 1.0], # x[2] instances[0]
|
||||||
comp = PrimalSolutionComponent(classifier=[clf_zero, clf_one],
|
[1.0, 0.0], # x[3] instances[0]
|
||||||
threshold=0.50)
|
]
|
||||||
|
)
|
||||||
|
)
|
||||||
|
comp = PrimalSolutionComponent(classifier=[clf_zero, clf_one], threshold=0.50)
|
||||||
comp.fit(instances[:1])
|
comp.fit(instances[:1])
|
||||||
assert comp.predict(instances[0]) == {"x": {0: 0,
|
assert comp.predict(instances[0]) == {"x": {0: 0, 1: 0, 2: 1, 3: None}}
|
||||||
1: 0,
|
assert instances[0].solution == {"x": {0: 1, 1: 0, 2: 1, 3: 1}}
|
||||||
2: 1,
|
|
||||||
3: None}}
|
|
||||||
assert instances[0].solution == {"x": {0: 1,
|
|
||||||
1: 0,
|
|
||||||
2: 1,
|
|
||||||
3: 1}}
|
|
||||||
ev = comp.evaluate(instances[:1])
|
ev = comp.evaluate(instances[:1])
|
||||||
assert ev == {'Fix one': {0: {'Accuracy': 0.5,
|
assert ev == {
|
||||||
'Condition negative': 1,
|
"Fix one": {
|
||||||
'Condition negative (%)': 25.0,
|
0: {
|
||||||
'Condition positive': 3,
|
"Accuracy": 0.5,
|
||||||
'Condition positive (%)': 75.0,
|
"Condition negative": 1,
|
||||||
'F1 score': 0.5,
|
"Condition negative (%)": 25.0,
|
||||||
'False negative': 2,
|
"Condition positive": 3,
|
||||||
'False negative (%)': 50.0,
|
"Condition positive (%)": 75.0,
|
||||||
'False positive': 0,
|
"F1 score": 0.5,
|
||||||
'False positive (%)': 0.0,
|
"False negative": 2,
|
||||||
'Precision': 1.0,
|
"False negative (%)": 50.0,
|
||||||
'Predicted negative': 3,
|
"False positive": 0,
|
||||||
'Predicted negative (%)': 75.0,
|
"False positive (%)": 0.0,
|
||||||
'Predicted positive': 1,
|
"Precision": 1.0,
|
||||||
'Predicted positive (%)': 25.0,
|
"Predicted negative": 3,
|
||||||
'Recall': 0.3333333333333333,
|
"Predicted negative (%)": 75.0,
|
||||||
'True negative': 1,
|
"Predicted positive": 1,
|
||||||
'True negative (%)': 25.0,
|
"Predicted positive (%)": 25.0,
|
||||||
'True positive': 1,
|
"Recall": 0.3333333333333333,
|
||||||
'True positive (%)': 25.0}},
|
"True negative": 1,
|
||||||
'Fix zero': {0: {'Accuracy': 0.75,
|
"True negative (%)": 25.0,
|
||||||
'Condition negative': 3,
|
"True positive": 1,
|
||||||
'Condition negative (%)': 75.0,
|
"True positive (%)": 25.0,
|
||||||
'Condition positive': 1,
|
}
|
||||||
'Condition positive (%)': 25.0,
|
},
|
||||||
'F1 score': 0.6666666666666666,
|
"Fix zero": {
|
||||||
'False negative': 0,
|
0: {
|
||||||
'False negative (%)': 0.0,
|
"Accuracy": 0.75,
|
||||||
'False positive': 1,
|
"Condition negative": 3,
|
||||||
'False positive (%)': 25.0,
|
"Condition negative (%)": 75.0,
|
||||||
'Precision': 0.5,
|
"Condition positive": 1,
|
||||||
'Predicted negative': 2,
|
"Condition positive (%)": 25.0,
|
||||||
'Predicted negative (%)': 50.0,
|
"F1 score": 0.6666666666666666,
|
||||||
'Predicted positive': 2,
|
"False negative": 0,
|
||||||
'Predicted positive (%)': 50.0,
|
"False negative (%)": 0.0,
|
||||||
'Recall': 1.0,
|
"False positive": 1,
|
||||||
'True negative': 2,
|
"False positive (%)": 25.0,
|
||||||
'True negative (%)': 50.0,
|
"Precision": 0.5,
|
||||||
'True positive': 1,
|
"Predicted negative": 2,
|
||||||
'True positive (%)': 25.0}}}
|
"Predicted negative (%)": 50.0,
|
||||||
|
"Predicted positive": 2,
|
||||||
|
"Predicted positive (%)": 50.0,
|
||||||
|
"Recall": 1.0,
|
||||||
|
"True negative": 2,
|
||||||
|
"True negative (%)": 50.0,
|
||||||
|
"True positive": 1,
|
||||||
|
"True positive (%)": 25.0,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
def test_primal_parallel_fit():
|
def test_primal_parallel_fit():
|
||||||
|
|||||||
@@ -4,10 +4,7 @@
|
|||||||
|
|
||||||
from unittest.mock import Mock, call
|
from unittest.mock import Mock, call
|
||||||
|
|
||||||
from miplearn import (RelaxationComponent,
|
from miplearn import RelaxationComponent, LearningSolver, Instance, InternalSolver
|
||||||
LearningSolver,
|
|
||||||
Instance,
|
|
||||||
InternalSolver)
|
|
||||||
from miplearn.classifiers import Classifier
|
from miplearn.classifiers import Classifier
|
||||||
|
|
||||||
|
|
||||||
@@ -16,41 +13,49 @@ def _setup():
|
|||||||
|
|
||||||
internal = solver.internal_solver = Mock(spec=InternalSolver)
|
internal = solver.internal_solver = Mock(spec=InternalSolver)
|
||||||
internal.get_constraint_ids = Mock(return_value=["c1", "c2", "c3", "c4"])
|
internal.get_constraint_ids = Mock(return_value=["c1", "c2", "c3", "c4"])
|
||||||
internal.get_constraint_slacks = Mock(side_effect=lambda: {
|
internal.get_constraint_slacks = Mock(
|
||||||
"c1": 0.5,
|
side_effect=lambda: {
|
||||||
"c2": 0.0,
|
"c1": 0.5,
|
||||||
"c3": 0.0,
|
"c2": 0.0,
|
||||||
"c4": 1.4,
|
"c3": 0.0,
|
||||||
})
|
"c4": 1.4,
|
||||||
|
}
|
||||||
|
)
|
||||||
internal.extract_constraint = Mock(side_effect=lambda cid: "<%s>" % cid)
|
internal.extract_constraint = Mock(side_effect=lambda cid: "<%s>" % cid)
|
||||||
internal.is_constraint_satisfied = Mock(return_value=False)
|
internal.is_constraint_satisfied = Mock(return_value=False)
|
||||||
|
|
||||||
instance = Mock(spec=Instance)
|
instance = Mock(spec=Instance)
|
||||||
instance.get_constraint_features = Mock(side_effect=lambda cid: {
|
instance.get_constraint_features = Mock(
|
||||||
"c2": [1.0, 0.0],
|
side_effect=lambda cid: {
|
||||||
"c3": [0.5, 0.5],
|
"c2": [1.0, 0.0],
|
||||||
"c4": [1.0],
|
"c3": [0.5, 0.5],
|
||||||
}[cid])
|
"c4": [1.0],
|
||||||
instance.get_constraint_category = Mock(side_effect=lambda cid: {
|
}[cid]
|
||||||
"c1": None,
|
)
|
||||||
"c2": "type-a",
|
instance.get_constraint_category = Mock(
|
||||||
"c3": "type-a",
|
side_effect=lambda cid: {
|
||||||
"c4": "type-b",
|
"c1": None,
|
||||||
}[cid])
|
"c2": "type-a",
|
||||||
|
"c3": "type-a",
|
||||||
|
"c4": "type-b",
|
||||||
|
}[cid]
|
||||||
|
)
|
||||||
|
|
||||||
classifiers = {
|
classifiers = {
|
||||||
"type-a": Mock(spec=Classifier),
|
"type-a": Mock(spec=Classifier),
|
||||||
"type-b": Mock(spec=Classifier),
|
"type-b": Mock(spec=Classifier),
|
||||||
}
|
}
|
||||||
classifiers["type-a"].predict_proba = \
|
classifiers["type-a"].predict_proba = Mock(
|
||||||
Mock(return_value=[
|
return_value=[
|
||||||
[0.20, 0.80],
|
[0.20, 0.80],
|
||||||
[0.05, 0.95],
|
[0.05, 0.95],
|
||||||
])
|
]
|
||||||
classifiers["type-b"].predict_proba = \
|
)
|
||||||
Mock(return_value=[
|
classifiers["type-b"].predict_proba = Mock(
|
||||||
|
return_value=[
|
||||||
[0.02, 0.98],
|
[0.02, 0.98],
|
||||||
])
|
]
|
||||||
|
)
|
||||||
|
|
||||||
return solver, internal, instance, classifiers
|
return solver, internal, instance, classifiers
|
||||||
|
|
||||||
@@ -72,25 +77,39 @@ def test_usage():
|
|||||||
|
|
||||||
# Should query category and features for each constraint in the model
|
# Should query category and features for each constraint in the model
|
||||||
assert instance.get_constraint_category.call_count == 4
|
assert instance.get_constraint_category.call_count == 4
|
||||||
instance.get_constraint_category.assert_has_calls([
|
instance.get_constraint_category.assert_has_calls(
|
||||||
call("c1"), call("c2"), call("c3"), call("c4"),
|
[
|
||||||
])
|
call("c1"),
|
||||||
|
call("c2"),
|
||||||
|
call("c3"),
|
||||||
|
call("c4"),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
# For constraint with non-null categories, should ask for features
|
# For constraint with non-null categories, should ask for features
|
||||||
assert instance.get_constraint_features.call_count == 3
|
assert instance.get_constraint_features.call_count == 3
|
||||||
instance.get_constraint_features.assert_has_calls([
|
instance.get_constraint_features.assert_has_calls(
|
||||||
call("c2"), call("c3"), call("c4"),
|
[
|
||||||
])
|
call("c2"),
|
||||||
|
call("c3"),
|
||||||
|
call("c4"),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
# Should ask ML to predict whether constraint should be removed
|
# Should ask ML to predict whether constraint should be removed
|
||||||
component.classifiers["type-a"].predict_proba.assert_called_once_with([[1.0, 0.0], [0.5, 0.5]])
|
component.classifiers["type-a"].predict_proba.assert_called_once_with(
|
||||||
|
[[1.0, 0.0], [0.5, 0.5]]
|
||||||
|
)
|
||||||
component.classifiers["type-b"].predict_proba.assert_called_once_with([[1.0]])
|
component.classifiers["type-b"].predict_proba.assert_called_once_with([[1.0]])
|
||||||
|
|
||||||
# Should ask internal solver to remove constraints predicted as redundant
|
# Should ask internal solver to remove constraints predicted as redundant
|
||||||
assert internal.extract_constraint.call_count == 2
|
assert internal.extract_constraint.call_count == 2
|
||||||
internal.extract_constraint.assert_has_calls([
|
internal.extract_constraint.assert_has_calls(
|
||||||
call("c3"), call("c4"),
|
[
|
||||||
])
|
call("c3"),
|
||||||
|
call("c4"),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
# LearningSolver calls after_solve
|
# LearningSolver calls after_solve
|
||||||
component.after_solve(solver, instance, None, None)
|
component.after_solve(solver, instance, None, None)
|
||||||
@@ -111,8 +130,7 @@ def test_usage():
|
|||||||
def test_usage_with_check_dropped():
|
def test_usage_with_check_dropped():
|
||||||
solver, internal, instance, classifiers = _setup()
|
solver, internal, instance, classifiers = _setup()
|
||||||
|
|
||||||
component = RelaxationComponent(check_dropped=True,
|
component = RelaxationComponent(check_dropped=True, violation_tolerance=1e-3)
|
||||||
violation_tolerance=1e-3)
|
|
||||||
component.classifiers = classifiers
|
component.classifiers = classifiers
|
||||||
|
|
||||||
# LearningSolver call before_solve
|
# LearningSolver call before_solve
|
||||||
@@ -120,9 +138,12 @@ def test_usage_with_check_dropped():
|
|||||||
|
|
||||||
# Assert constraints are extracted
|
# Assert constraints are extracted
|
||||||
assert internal.extract_constraint.call_count == 2
|
assert internal.extract_constraint.call_count == 2
|
||||||
internal.extract_constraint.assert_has_calls([
|
internal.extract_constraint.assert_has_calls(
|
||||||
call("c3"), call("c4"),
|
[
|
||||||
])
|
call("c3"),
|
||||||
|
call("c4"),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
# LearningSolver calls iteration_cb (first time)
|
# LearningSolver calls iteration_cb (first time)
|
||||||
should_repeat = component.iteration_cb(solver, instance, None)
|
should_repeat = component.iteration_cb(solver, instance, None)
|
||||||
@@ -131,15 +152,15 @@ def test_usage_with_check_dropped():
|
|||||||
assert should_repeat
|
assert should_repeat
|
||||||
|
|
||||||
# Should ask solver if removed constraints are satisfied (mock always returns false)
|
# Should ask solver if removed constraints are satisfied (mock always returns false)
|
||||||
internal.is_constraint_satisfied.assert_has_calls([
|
internal.is_constraint_satisfied.assert_has_calls(
|
||||||
call("<c3>", 1e-3),
|
[
|
||||||
call("<c4>", 1e-3),
|
call("<c3>", 1e-3),
|
||||||
])
|
call("<c4>", 1e-3),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
# Should add constraints back to LP relaxation
|
# Should add constraints back to LP relaxation
|
||||||
internal.add_constraint.assert_has_calls([
|
internal.add_constraint.assert_has_calls([call("<c3>"), call("<c4>")])
|
||||||
call("<c3>"), call("<c4>")
|
|
||||||
])
|
|
||||||
|
|
||||||
# LearningSolver calls iteration_cb (second time)
|
# LearningSolver calls iteration_cb (second time)
|
||||||
should_repeat = component.iteration_cb(solver, instance, None)
|
should_repeat = component.iteration_cb(solver, instance, None)
|
||||||
@@ -148,21 +169,22 @@ def test_usage_with_check_dropped():
|
|||||||
|
|
||||||
def test_x_y_fit_predict_evaluate():
|
def test_x_y_fit_predict_evaluate():
|
||||||
instances = [Mock(spec=Instance), Mock(spec=Instance)]
|
instances = [Mock(spec=Instance), Mock(spec=Instance)]
|
||||||
component = RelaxationComponent(slack_tolerance=0.05,
|
component = RelaxationComponent(slack_tolerance=0.05, threshold=0.80)
|
||||||
threshold=0.80)
|
|
||||||
component.classifiers = {
|
component.classifiers = {
|
||||||
"type-a": Mock(spec=Classifier),
|
"type-a": Mock(spec=Classifier),
|
||||||
"type-b": Mock(spec=Classifier),
|
"type-b": Mock(spec=Classifier),
|
||||||
}
|
}
|
||||||
component.classifiers["type-a"].predict_proba = \
|
component.classifiers["type-a"].predict_proba = Mock(
|
||||||
Mock(return_value=[
|
return_value=[
|
||||||
[0.20, 0.80],
|
[0.20, 0.80],
|
||||||
])
|
]
|
||||||
component.classifiers["type-b"].predict_proba = \
|
)
|
||||||
Mock(return_value=[
|
component.classifiers["type-b"].predict_proba = Mock(
|
||||||
|
return_value=[
|
||||||
[0.50, 0.50],
|
[0.50, 0.50],
|
||||||
[0.05, 0.95],
|
[0.05, 0.95],
|
||||||
])
|
]
|
||||||
|
)
|
||||||
|
|
||||||
# First mock instance
|
# First mock instance
|
||||||
instances[0].slacks = {
|
instances[0].slacks = {
|
||||||
@@ -171,17 +193,21 @@ def test_x_y_fit_predict_evaluate():
|
|||||||
"c3": 0.00,
|
"c3": 0.00,
|
||||||
"c4": 30.0,
|
"c4": 30.0,
|
||||||
}
|
}
|
||||||
instances[0].get_constraint_category = Mock(side_effect=lambda cid: {
|
instances[0].get_constraint_category = Mock(
|
||||||
"c1": None,
|
side_effect=lambda cid: {
|
||||||
"c2": "type-a",
|
"c1": None,
|
||||||
"c3": "type-a",
|
"c2": "type-a",
|
||||||
"c4": "type-b",
|
"c3": "type-a",
|
||||||
}[cid])
|
"c4": "type-b",
|
||||||
instances[0].get_constraint_features = Mock(side_effect=lambda cid: {
|
}[cid]
|
||||||
"c2": [1.0, 0.0],
|
)
|
||||||
"c3": [0.5, 0.5],
|
instances[0].get_constraint_features = Mock(
|
||||||
"c4": [1.0],
|
side_effect=lambda cid: {
|
||||||
}[cid])
|
"c2": [1.0, 0.0],
|
||||||
|
"c3": [0.5, 0.5],
|
||||||
|
"c4": [1.0],
|
||||||
|
}[cid]
|
||||||
|
)
|
||||||
|
|
||||||
# Second mock instance
|
# Second mock instance
|
||||||
instances[1].slacks = {
|
instances[1].slacks = {
|
||||||
@@ -190,26 +216,27 @@ def test_x_y_fit_predict_evaluate():
|
|||||||
"c4": 0.00,
|
"c4": 0.00,
|
||||||
"c5": 0.00,
|
"c5": 0.00,
|
||||||
}
|
}
|
||||||
instances[1].get_constraint_category = Mock(side_effect=lambda cid: {
|
instances[1].get_constraint_category = Mock(
|
||||||
"c1": None,
|
side_effect=lambda cid: {
|
||||||
"c3": "type-a",
|
"c1": None,
|
||||||
"c4": "type-b",
|
"c3": "type-a",
|
||||||
"c5": "type-b",
|
"c4": "type-b",
|
||||||
}[cid])
|
"c5": "type-b",
|
||||||
instances[1].get_constraint_features = Mock(side_effect=lambda cid: {
|
}[cid]
|
||||||
"c3": [0.3, 0.4],
|
)
|
||||||
"c4": [0.7],
|
instances[1].get_constraint_features = Mock(
|
||||||
"c5": [0.8],
|
side_effect=lambda cid: {
|
||||||
}[cid])
|
"c3": [0.3, 0.4],
|
||||||
|
"c4": [0.7],
|
||||||
|
"c5": [0.8],
|
||||||
|
}[cid]
|
||||||
|
)
|
||||||
|
|
||||||
expected_x = {
|
expected_x = {
|
||||||
"type-a": [[1.0, 0.0], [0.5, 0.5], [0.3, 0.4]],
|
"type-a": [[1.0, 0.0], [0.5, 0.5], [0.3, 0.4]],
|
||||||
"type-b": [[1.0], [0.7], [0.8]],
|
"type-b": [[1.0], [0.7], [0.8]],
|
||||||
}
|
}
|
||||||
expected_y = {
|
expected_y = {"type-a": [[0], [0], [1]], "type-b": [[1], [0], [0]]}
|
||||||
"type-a": [[0], [0], [1]],
|
|
||||||
"type-b": [[1], [0], [0]]
|
|
||||||
}
|
|
||||||
|
|
||||||
# Should build X and Y matrices correctly
|
# Should build X and Y matrices correctly
|
||||||
assert component.x(instances) == expected_x
|
assert component.x(instances) == expected_x
|
||||||
@@ -217,13 +244,16 @@ def test_x_y_fit_predict_evaluate():
|
|||||||
|
|
||||||
# Should pass along X and Y matrices to classifiers
|
# Should pass along X and Y matrices to classifiers
|
||||||
component.fit(instances)
|
component.fit(instances)
|
||||||
component.classifiers["type-a"].fit.assert_called_with(expected_x["type-a"], expected_y["type-a"])
|
component.classifiers["type-a"].fit.assert_called_with(
|
||||||
component.classifiers["type-b"].fit.assert_called_with(expected_x["type-b"], expected_y["type-b"])
|
expected_x["type-a"],
|
||||||
|
expected_y["type-a"],
|
||||||
|
)
|
||||||
|
component.classifiers["type-b"].fit.assert_called_with(
|
||||||
|
expected_x["type-b"],
|
||||||
|
expected_y["type-b"],
|
||||||
|
)
|
||||||
|
|
||||||
assert component.predict(expected_x) == {
|
assert component.predict(expected_x) == {"type-a": [[1]], "type-b": [[0], [1]]}
|
||||||
"type-a": [[1]],
|
|
||||||
"type-b": [[0], [1]]
|
|
||||||
}
|
|
||||||
|
|
||||||
ev = component.evaluate(instances[1])
|
ev = component.evaluate(instances[1])
|
||||||
assert ev["True positive"] == 1
|
assert ev["True positive"] == 1
|
||||||
|
|||||||
@@ -40,7 +40,7 @@ class InstanceIterator:
|
|||||||
|
|
||||||
class Extractor(ABC):
|
class Extractor(ABC):
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def extract(self, instances,):
|
def extract(self, instances):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@@ -61,9 +61,11 @@ class Extractor(ABC):
|
|||||||
class VariableFeaturesExtractor(Extractor):
|
class VariableFeaturesExtractor(Extractor):
|
||||||
def extract(self, instances):
|
def extract(self, instances):
|
||||||
result = {}
|
result = {}
|
||||||
for instance in tqdm(InstanceIterator(instances),
|
for instance in tqdm(
|
||||||
desc="Extract (vars)",
|
InstanceIterator(instances),
|
||||||
disable=len(instances) < 5):
|
desc="Extract (vars)",
|
||||||
|
disable=len(instances) < 5,
|
||||||
|
):
|
||||||
instance_features = instance.get_instance_features()
|
instance_features = instance.get_instance_features()
|
||||||
var_split = self.split_variables(instance)
|
var_split = self.split_variables(instance)
|
||||||
for (category, var_index_pairs) in var_split.items():
|
for (category, var_index_pairs) in var_split.items():
|
||||||
@@ -71,9 +73,9 @@ class VariableFeaturesExtractor(Extractor):
|
|||||||
result[category] = []
|
result[category] = []
|
||||||
for (var_name, index) in var_index_pairs:
|
for (var_name, index) in var_index_pairs:
|
||||||
result[category] += [
|
result[category] += [
|
||||||
instance_features.tolist() + \
|
instance_features.tolist()
|
||||||
instance.get_variable_features(var_name, index).tolist() + \
|
+ instance.get_variable_features(var_name, index).tolist()
|
||||||
[instance.lp_solution[var_name][index]]
|
+ [instance.lp_solution[var_name][index]]
|
||||||
]
|
]
|
||||||
for category in result:
|
for category in result:
|
||||||
result[category] = np.array(result[category])
|
result[category] = np.array(result[category])
|
||||||
@@ -86,9 +88,11 @@ class SolutionExtractor(Extractor):
|
|||||||
|
|
||||||
def extract(self, instances):
|
def extract(self, instances):
|
||||||
result = {}
|
result = {}
|
||||||
for instance in tqdm(InstanceIterator(instances),
|
for instance in tqdm(
|
||||||
desc="Extract (solution)",
|
InstanceIterator(instances),
|
||||||
disable=len(instances) < 5):
|
desc="Extract (solution)",
|
||||||
|
disable=len(instances) < 5,
|
||||||
|
):
|
||||||
var_split = self.split_variables(instance)
|
var_split = self.split_variables(instance)
|
||||||
for (category, var_index_pairs) in var_split.items():
|
for (category, var_index_pairs) in var_split.items():
|
||||||
if category not in result:
|
if category not in result:
|
||||||
@@ -109,13 +113,17 @@ class SolutionExtractor(Extractor):
|
|||||||
|
|
||||||
class InstanceFeaturesExtractor(Extractor):
|
class InstanceFeaturesExtractor(Extractor):
|
||||||
def extract(self, instances):
|
def extract(self, instances):
|
||||||
return np.vstack([
|
return np.vstack(
|
||||||
np.hstack([
|
[
|
||||||
instance.get_instance_features(),
|
np.hstack(
|
||||||
instance.lp_value,
|
[
|
||||||
])
|
instance.get_instance_features(),
|
||||||
for instance in InstanceIterator(instances)
|
instance.lp_value,
|
||||||
])
|
]
|
||||||
|
)
|
||||||
|
for instance in InstanceIterator(instances)
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class ObjectiveValueExtractor(Extractor):
|
class ObjectiveValueExtractor(Extractor):
|
||||||
@@ -125,11 +133,14 @@ class ObjectiveValueExtractor(Extractor):
|
|||||||
|
|
||||||
def extract(self, instances):
|
def extract(self, instances):
|
||||||
if self.kind == "lower bound":
|
if self.kind == "lower bound":
|
||||||
return np.array([[instance.lower_bound]
|
return np.array(
|
||||||
for instance in InstanceIterator(instances)])
|
[[instance.lower_bound] for instance in InstanceIterator(instances)]
|
||||||
|
)
|
||||||
if self.kind == "upper bound":
|
if self.kind == "upper bound":
|
||||||
return np.array([[instance.upper_bound]
|
return np.array(
|
||||||
for instance in InstanceIterator(instances)])
|
[[instance.upper_bound] for instance in InstanceIterator(instances)]
|
||||||
|
)
|
||||||
if self.kind == "lp":
|
if self.kind == "lp":
|
||||||
return np.array([[instance.lp_value]
|
return np.array(
|
||||||
for instance in InstanceIterator(instances)])
|
[[instance.lp_value] for instance in InstanceIterator(instances)]
|
||||||
|
)
|
||||||
|
|||||||
@@ -141,11 +141,11 @@ class Instance(ABC):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
def load(self, filename):
|
def load(self, filename):
|
||||||
with gzip.GzipFile(filename, 'r') as f:
|
with gzip.GzipFile(filename, "r") as f:
|
||||||
data = json.loads(f.read().decode('utf-8'))
|
data = json.loads(f.read().decode("utf-8"))
|
||||||
self.__dict__ = data
|
self.__dict__ = data
|
||||||
|
|
||||||
def dump(self, filename):
|
def dump(self, filename):
|
||||||
data = json.dumps(self.__dict__, indent=2).encode('utf-8')
|
data = json.dumps(self.__dict__, indent=2).encode("utf-8")
|
||||||
with gzip.GzipFile(filename, 'w') as f:
|
with gzip.GzipFile(filename, "w") as f:
|
||||||
f.write(data)
|
f.write(data)
|
||||||
|
|||||||
@@ -7,7 +7,8 @@ import logging
|
|||||||
import time
|
import time
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
class TimeFormatter():
|
|
||||||
|
class TimeFormatter:
|
||||||
def __init__(self, start_time, log_colors):
|
def __init__(self, start_time, log_colors):
|
||||||
self.start_time = start_time
|
self.start_time = start_time
|
||||||
self.log_colors = log_colors
|
self.log_colors = log_colors
|
||||||
@@ -19,21 +20,23 @@ class TimeFormatter():
|
|||||||
color = self.log_colors["yellow"]
|
color = self.log_colors["yellow"]
|
||||||
else:
|
else:
|
||||||
color = self.log_colors["green"]
|
color = self.log_colors["green"]
|
||||||
return "%s[%12.3f]%s %s" % (color,
|
return "%s[%12.3f]%s %s" % (
|
||||||
record.created - self.start_time,
|
color,
|
||||||
self.log_colors["reset"],
|
record.created - self.start_time,
|
||||||
record.getMessage())
|
self.log_colors["reset"],
|
||||||
|
record.getMessage(),
|
||||||
|
)
|
||||||
|
|
||||||
def setup_logger(start_time=None,
|
|
||||||
force_color=False):
|
def setup_logger(start_time=None, force_color=False):
|
||||||
if start_time is None:
|
if start_time is None:
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
if sys.stdout.isatty() or force_color:
|
if sys.stdout.isatty() or force_color:
|
||||||
log_colors = {
|
log_colors = {
|
||||||
"green": '\033[92m',
|
"green": "\033[92m",
|
||||||
"yellow": '\033[93m',
|
"yellow": "\033[93m",
|
||||||
"red": '\033[91m',
|
"red": "\033[91m",
|
||||||
"reset": '\033[0m',
|
"reset": "\033[0m",
|
||||||
}
|
}
|
||||||
else:
|
else:
|
||||||
log_colors = {
|
log_colors = {
|
||||||
|
|||||||
@@ -17,21 +17,25 @@ class ChallengeA:
|
|||||||
- K = 500, u ~ U(0., 1.)
|
- K = 500, u ~ U(0., 1.)
|
||||||
- alpha = 0.25
|
- alpha = 0.25
|
||||||
"""
|
"""
|
||||||
def __init__(self,
|
|
||||||
seed=42,
|
def __init__(
|
||||||
n_training_instances=500,
|
self,
|
||||||
n_test_instances=50):
|
seed=42,
|
||||||
|
n_training_instances=500,
|
||||||
|
n_test_instances=50,
|
||||||
|
):
|
||||||
|
|
||||||
np.random.seed(seed)
|
np.random.seed(seed)
|
||||||
self.gen = MultiKnapsackGenerator(n=randint(low=250, high=251),
|
self.gen = MultiKnapsackGenerator(
|
||||||
m=randint(low=10, high=11),
|
n=randint(low=250, high=251),
|
||||||
w=uniform(loc=0.0, scale=1000.0),
|
m=randint(low=10, high=11),
|
||||||
K=uniform(loc=500.0, scale=0.0),
|
w=uniform(loc=0.0, scale=1000.0),
|
||||||
u=uniform(loc=0.0, scale=1.0),
|
K=uniform(loc=500.0, scale=0.0),
|
||||||
alpha=uniform(loc=0.25, scale=0.0),
|
u=uniform(loc=0.0, scale=1.0),
|
||||||
fix_w=True,
|
alpha=uniform(loc=0.25, scale=0.0),
|
||||||
w_jitter=uniform(loc=0.95, scale=0.1),
|
fix_w=True,
|
||||||
)
|
w_jitter=uniform(loc=0.95, scale=0.1),
|
||||||
|
)
|
||||||
np.random.seed(seed + 1)
|
np.random.seed(seed + 1)
|
||||||
self.training_instances = self.gen.generate(n_training_instances)
|
self.training_instances = self.gen.generate(n_training_instances)
|
||||||
|
|
||||||
@@ -51,10 +55,7 @@ class MultiKnapsackInstance(Instance):
|
|||||||
same size and items don't shuffle around.
|
same size and items don't shuffle around.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self,
|
def __init__(self, prices, capacities, weights):
|
||||||
prices,
|
|
||||||
capacities,
|
|
||||||
weights):
|
|
||||||
assert isinstance(prices, np.ndarray)
|
assert isinstance(prices, np.ndarray)
|
||||||
assert isinstance(capacities, np.ndarray)
|
assert isinstance(capacities, np.ndarray)
|
||||||
assert isinstance(weights, np.ndarray)
|
assert isinstance(weights, np.ndarray)
|
||||||
@@ -69,44 +70,53 @@ class MultiKnapsackInstance(Instance):
|
|||||||
def to_model(self):
|
def to_model(self):
|
||||||
model = pe.ConcreteModel()
|
model = pe.ConcreteModel()
|
||||||
model.x = pe.Var(range(self.n), domain=pe.Binary)
|
model.x = pe.Var(range(self.n), domain=pe.Binary)
|
||||||
model.OBJ = pe.Objective(rule=lambda model: sum(model.x[j] * self.prices[j]
|
model.OBJ = pe.Objective(
|
||||||
for j in range(self.n)),
|
rule=lambda model: sum(model.x[j] * self.prices[j] for j in range(self.n)),
|
||||||
sense=pe.maximize)
|
sense=pe.maximize,
|
||||||
|
)
|
||||||
model.eq_capacity = pe.ConstraintList()
|
model.eq_capacity = pe.ConstraintList()
|
||||||
for i in range(self.m):
|
for i in range(self.m):
|
||||||
model.eq_capacity.add(sum(model.x[j] * self.weights[i,j]
|
model.eq_capacity.add(
|
||||||
for j in range(self.n)) <= self.capacities[i])
|
sum(model.x[j] * self.weights[i, j] for j in range(self.n))
|
||||||
|
<= self.capacities[i]
|
||||||
|
)
|
||||||
|
|
||||||
return model
|
return model
|
||||||
|
|
||||||
def get_instance_features(self):
|
def get_instance_features(self):
|
||||||
return np.hstack([
|
return np.hstack(
|
||||||
np.mean(self.prices),
|
[
|
||||||
self.capacities,
|
np.mean(self.prices),
|
||||||
])
|
self.capacities,
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
def get_variable_features(self, var, index):
|
def get_variable_features(self, var, index):
|
||||||
return np.hstack([
|
return np.hstack(
|
||||||
self.prices[index],
|
[
|
||||||
self.weights[:, index],
|
self.prices[index],
|
||||||
])
|
self.weights[:, index],
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
# def get_variable_category(self, var, index):
|
# def get_variable_category(self, var, index):
|
||||||
# return index
|
# return index
|
||||||
|
|
||||||
|
|
||||||
class MultiKnapsackGenerator:
|
class MultiKnapsackGenerator:
|
||||||
def __init__(self,
|
def __init__(
|
||||||
n=randint(low=100, high=101),
|
self,
|
||||||
m=randint(low=30, high=31),
|
n=randint(low=100, high=101),
|
||||||
w=randint(low=0, high=1000),
|
m=randint(low=30, high=31),
|
||||||
K=randint(low=500, high=500),
|
w=randint(low=0, high=1000),
|
||||||
u=uniform(loc=0.0, scale=1.0),
|
K=randint(low=500, high=500),
|
||||||
alpha=uniform(loc=0.25, scale=0.0),
|
u=uniform(loc=0.0, scale=1.0),
|
||||||
fix_w=False,
|
alpha=uniform(loc=0.25, scale=0.0),
|
||||||
w_jitter=uniform(loc=1.0, scale=0.0),
|
fix_w=False,
|
||||||
round=True,
|
w_jitter=uniform(loc=1.0, scale=0.0),
|
||||||
):
|
round=True,
|
||||||
|
):
|
||||||
"""Initialize the problem generator.
|
"""Initialize the problem generator.
|
||||||
|
|
||||||
Instances have a random number of items (or variables) and a random number of knapsacks
|
Instances have a random number of items (or variables) and a random number of knapsacks
|
||||||
@@ -168,10 +178,13 @@ class MultiKnapsackGenerator:
|
|||||||
assert isinstance(w, rv_frozen), "w should be a SciPy probability distribution"
|
assert isinstance(w, rv_frozen), "w should be a SciPy probability distribution"
|
||||||
assert isinstance(K, rv_frozen), "K should be a SciPy probability distribution"
|
assert isinstance(K, rv_frozen), "K should be a SciPy probability distribution"
|
||||||
assert isinstance(u, rv_frozen), "u should be a SciPy probability distribution"
|
assert isinstance(u, rv_frozen), "u should be a SciPy probability distribution"
|
||||||
assert isinstance(alpha, rv_frozen), "alpha should be a SciPy probability distribution"
|
assert isinstance(
|
||||||
|
alpha, rv_frozen
|
||||||
|
), "alpha should be a SciPy probability distribution"
|
||||||
assert isinstance(fix_w, bool), "fix_w should be boolean"
|
assert isinstance(fix_w, bool), "fix_w should be boolean"
|
||||||
assert isinstance(w_jitter, rv_frozen), \
|
assert isinstance(
|
||||||
"w_jitter should be a SciPy probability distribution"
|
w_jitter, rv_frozen
|
||||||
|
), "w_jitter should be a SciPy probability distribution"
|
||||||
|
|
||||||
self.n = n
|
self.n = n
|
||||||
self.m = m
|
self.m = m
|
||||||
@@ -211,13 +224,14 @@ class MultiKnapsackGenerator:
|
|||||||
K = self.K.rvs()
|
K = self.K.rvs()
|
||||||
w = w * np.array([self.w_jitter.rvs(n) for _ in range(m)])
|
w = w * np.array([self.w_jitter.rvs(n) for _ in range(m)])
|
||||||
alpha = self.alpha.rvs(m)
|
alpha = self.alpha.rvs(m)
|
||||||
p = np.array([w[:,j].sum() / m + K * u[j] for j in range(n)])
|
p = np.array([w[:, j].sum() / m + K * u[j] for j in range(n)])
|
||||||
b = np.array([w[i,:].sum() * alpha[i] for i in range(m)])
|
b = np.array([w[i, :].sum() * alpha[i] for i in range(m)])
|
||||||
if self.round:
|
if self.round:
|
||||||
p = p.round()
|
p = p.round()
|
||||||
b = b.round()
|
b = b.round()
|
||||||
w = w.round()
|
w = w.round()
|
||||||
return MultiKnapsackInstance(p, b, w)
|
return MultiKnapsackInstance(p, b, w)
|
||||||
|
|
||||||
return [_sample() for _ in range(n_samples)]
|
return [_sample() for _ in range(n_samples)]
|
||||||
|
|
||||||
|
|
||||||
@@ -225,6 +239,7 @@ class KnapsackInstance(Instance):
|
|||||||
"""
|
"""
|
||||||
Simpler (one-dimensional) Knapsack Problem, used for testing.
|
Simpler (one-dimensional) Knapsack Problem, used for testing.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, weights, prices, capacity):
|
def __init__(self, weights, prices, capacity):
|
||||||
self.weights = weights
|
self.weights = weights
|
||||||
self.prices = prices
|
self.prices = prices
|
||||||
@@ -234,23 +249,29 @@ class KnapsackInstance(Instance):
|
|||||||
model = pe.ConcreteModel()
|
model = pe.ConcreteModel()
|
||||||
items = range(len(self.weights))
|
items = range(len(self.weights))
|
||||||
model.x = pe.Var(items, domain=pe.Binary)
|
model.x = pe.Var(items, domain=pe.Binary)
|
||||||
model.OBJ = pe.Objective(expr=sum(model.x[v] * self.prices[v] for v in items),
|
model.OBJ = pe.Objective(
|
||||||
sense=pe.maximize)
|
expr=sum(model.x[v] * self.prices[v] for v in items), sense=pe.maximize
|
||||||
model.eq_capacity = pe.Constraint(expr=sum(model.x[v] * self.weights[v]
|
)
|
||||||
for v in items) <= self.capacity)
|
model.eq_capacity = pe.Constraint(
|
||||||
|
expr=sum(model.x[v] * self.weights[v] for v in items) <= self.capacity
|
||||||
|
)
|
||||||
return model
|
return model
|
||||||
|
|
||||||
def get_instance_features(self):
|
def get_instance_features(self):
|
||||||
return np.array([
|
return np.array(
|
||||||
self.capacity,
|
[
|
||||||
np.average(self.weights),
|
self.capacity,
|
||||||
])
|
np.average(self.weights),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
def get_variable_features(self, var, index):
|
def get_variable_features(self, var, index):
|
||||||
return np.array([
|
return np.array(
|
||||||
self.weights[index],
|
[
|
||||||
self.prices[index],
|
self.weights[index],
|
||||||
])
|
self.prices[index],
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class GurobiKnapsackInstance(KnapsackInstance):
|
class GurobiKnapsackInstance(KnapsackInstance):
|
||||||
@@ -258,6 +279,7 @@ class GurobiKnapsackInstance(KnapsackInstance):
|
|||||||
Simpler (one-dimensional) knapsack instance, implemented directly in Gurobi
|
Simpler (one-dimensional) knapsack instance, implemented directly in Gurobi
|
||||||
instead of Pyomo, used for testing.
|
instead of Pyomo, used for testing.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, weights, prices, capacity):
|
def __init__(self, weights, prices, capacity):
|
||||||
super().__init__(weights, prices, capacity)
|
super().__init__(weights, prices, capacity)
|
||||||
|
|
||||||
@@ -268,9 +290,11 @@ class GurobiKnapsackInstance(KnapsackInstance):
|
|||||||
model = gp.Model("Knapsack")
|
model = gp.Model("Knapsack")
|
||||||
n = len(self.weights)
|
n = len(self.weights)
|
||||||
x = model.addVars(n, vtype=GRB.BINARY, name="x")
|
x = model.addVars(n, vtype=GRB.BINARY, name="x")
|
||||||
model.addConstr(gp.quicksum(x[i] * self.weights[i]
|
model.addConstr(
|
||||||
for i in range(n)) <= self.capacity,
|
gp.quicksum(x[i] * self.weights[i] for i in range(n)) <= self.capacity,
|
||||||
"eq_capacity")
|
"eq_capacity",
|
||||||
model.setObjective(gp.quicksum(x[i] * self.prices[i]
|
)
|
||||||
for i in range(n)), GRB.MAXIMIZE)
|
model.setObjective(
|
||||||
|
gp.quicksum(x[i] * self.prices[i] for i in range(n)), GRB.MAXIMIZE
|
||||||
|
)
|
||||||
return model
|
return model
|
||||||
|
|||||||
@@ -12,17 +12,20 @@ from scipy.stats.distributions import rv_frozen
|
|||||||
|
|
||||||
|
|
||||||
class ChallengeA:
|
class ChallengeA:
|
||||||
def __init__(self,
|
def __init__(
|
||||||
seed=42,
|
self,
|
||||||
n_training_instances=500,
|
seed=42,
|
||||||
n_test_instances=50,
|
n_training_instances=500,
|
||||||
):
|
n_test_instances=50,
|
||||||
|
):
|
||||||
|
|
||||||
np.random.seed(seed)
|
np.random.seed(seed)
|
||||||
self.generator = MaxWeightStableSetGenerator(w=uniform(loc=100., scale=50.),
|
self.generator = MaxWeightStableSetGenerator(
|
||||||
n=randint(low=200, high=201),
|
w=uniform(loc=100.0, scale=50.0),
|
||||||
p=uniform(loc=0.05, scale=0.0),
|
n=randint(low=200, high=201),
|
||||||
fix_graph=True)
|
p=uniform(loc=0.05, scale=0.0),
|
||||||
|
fix_graph=True,
|
||||||
|
)
|
||||||
|
|
||||||
np.random.seed(seed + 1)
|
np.random.seed(seed + 1)
|
||||||
self.training_instances = self.generator.generate(n_training_instances)
|
self.training_instances = self.generator.generate(n_training_instances)
|
||||||
@@ -43,11 +46,13 @@ class MaxWeightStableSetGenerator:
|
|||||||
parameters are sampled in the same way.
|
parameters are sampled in the same way.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self,
|
def __init__(
|
||||||
w=uniform(loc=10.0, scale=1.0),
|
self,
|
||||||
n=randint(low=250, high=251),
|
w=uniform(loc=10.0, scale=1.0),
|
||||||
p=uniform(loc=0.05, scale=0.0),
|
n=randint(low=250, high=251),
|
||||||
fix_graph=True):
|
p=uniform(loc=0.05, scale=0.0),
|
||||||
|
fix_graph=True,
|
||||||
|
):
|
||||||
"""Initialize the problem generator.
|
"""Initialize the problem generator.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
@@ -78,6 +83,7 @@ class MaxWeightStableSetGenerator:
|
|||||||
graph = self._generate_graph()
|
graph = self._generate_graph()
|
||||||
weights = self.w.rvs(graph.number_of_nodes())
|
weights = self.w.rvs(graph.number_of_nodes())
|
||||||
return MaxWeightStableSetInstance(graph, weights)
|
return MaxWeightStableSetInstance(graph, weights)
|
||||||
|
|
||||||
return [_sample() for _ in range(n_samples)]
|
return [_sample() for _ in range(n_samples)]
|
||||||
|
|
||||||
def _generate_graph(self):
|
def _generate_graph(self):
|
||||||
@@ -102,8 +108,9 @@ class MaxWeightStableSetInstance(Instance):
|
|||||||
nodes = list(self.graph.nodes)
|
nodes = list(self.graph.nodes)
|
||||||
model = pe.ConcreteModel()
|
model = pe.ConcreteModel()
|
||||||
model.x = pe.Var(nodes, domain=pe.Binary)
|
model.x = pe.Var(nodes, domain=pe.Binary)
|
||||||
model.OBJ = pe.Objective(expr=sum(model.x[v] * self.weights[v] for v in nodes),
|
model.OBJ = pe.Objective(
|
||||||
sense=pe.maximize)
|
expr=sum(model.x[v] * self.weights[v] for v in nodes), sense=pe.maximize
|
||||||
|
)
|
||||||
model.clique_eqs = pe.ConstraintList()
|
model.clique_eqs = pe.ConstraintList()
|
||||||
for clique in nx.find_cliques(self.graph):
|
for clique in nx.find_cliques(self.graph):
|
||||||
model.clique_eqs.add(sum(model.x[i] for i in clique) <= 1)
|
model.clique_eqs.add(sum(model.x[i] for i in clique) <= 1)
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||||
# Released under the modified BSD license. See COPYING.md for more details.
|
# Released under the modified BSD license. See COPYING.md for more details.
|
||||||
|
|
||||||
|
|||||||
@@ -9,17 +9,18 @@ import numpy as np
|
|||||||
|
|
||||||
|
|
||||||
def test_knapsack_generator():
|
def test_knapsack_generator():
|
||||||
gen = MultiKnapsackGenerator(n=randint(low=100, high=101),
|
gen = MultiKnapsackGenerator(
|
||||||
m=randint(low=30, high=31),
|
n=randint(low=100, high=101),
|
||||||
w=randint(low=0, high=1000),
|
m=randint(low=30, high=31),
|
||||||
K=randint(low=500, high=501),
|
w=randint(low=0, high=1000),
|
||||||
u=uniform(loc=1.0, scale=1.0),
|
K=randint(low=500, high=501),
|
||||||
alpha=uniform(loc=0.50, scale=0.0),
|
u=uniform(loc=1.0, scale=1.0),
|
||||||
)
|
alpha=uniform(loc=0.50, scale=0.0),
|
||||||
|
)
|
||||||
instances = gen.generate(100)
|
instances = gen.generate(100)
|
||||||
w_sum = sum(instance.weights for instance in instances) / len(instances)
|
w_sum = sum(instance.weights for instance in instances) / len(instances)
|
||||||
p_sum = sum(instance.prices for instance in instances) / len(instances)
|
p_sum = sum(instance.prices for instance in instances) / len(instances)
|
||||||
b_sum = sum(instance.capacities for instance in instances) / len(instances)
|
b_sum = sum(instance.capacities for instance in instances) / len(instances)
|
||||||
assert round(np.mean(w_sum), -1) == 500.
|
assert round(np.mean(w_sum), -1) == 500.0
|
||||||
# assert round(np.mean(p_sum), -1) == 1200. # flaky
|
# assert round(np.mean(p_sum), -1) == 1200. # flaky
|
||||||
assert round(np.mean(b_sum), -3) == 25000.
|
assert round(np.mean(b_sum), -3) == 25000.0
|
||||||
|
|||||||
@@ -11,20 +11,23 @@ from scipy.stats import uniform, randint
|
|||||||
|
|
||||||
def test_stab():
|
def test_stab():
|
||||||
graph = nx.cycle_graph(5)
|
graph = nx.cycle_graph(5)
|
||||||
weights = [1., 1., 1., 1., 1.]
|
weights = [1.0, 1.0, 1.0, 1.0, 1.0]
|
||||||
instance = MaxWeightStableSetInstance(graph, weights)
|
instance = MaxWeightStableSetInstance(graph, weights)
|
||||||
solver = LearningSolver()
|
solver = LearningSolver()
|
||||||
solver.solve(instance)
|
solver.solve(instance)
|
||||||
assert instance.lower_bound == 2.
|
assert instance.lower_bound == 2.0
|
||||||
|
|
||||||
|
|
||||||
def test_stab_generator_fixed_graph():
|
def test_stab_generator_fixed_graph():
|
||||||
np.random.seed(42)
|
np.random.seed(42)
|
||||||
from miplearn.problems.stab import MaxWeightStableSetGenerator
|
from miplearn.problems.stab import MaxWeightStableSetGenerator
|
||||||
gen = MaxWeightStableSetGenerator(w=uniform(loc=50., scale=10.),
|
|
||||||
n=randint(low=10, high=11),
|
gen = MaxWeightStableSetGenerator(
|
||||||
p=uniform(loc=0.05, scale=0.),
|
w=uniform(loc=50.0, scale=10.0),
|
||||||
fix_graph=True)
|
n=randint(low=10, high=11),
|
||||||
|
p=uniform(loc=0.05, scale=0.0),
|
||||||
|
fix_graph=True,
|
||||||
|
)
|
||||||
instances = gen.generate(1_000)
|
instances = gen.generate(1_000)
|
||||||
weights = np.array([instance.weights for instance in instances])
|
weights = np.array([instance.weights for instance in instances])
|
||||||
weights_avg_actual = np.round(np.average(weights, axis=0))
|
weights_avg_actual = np.round(np.average(weights, axis=0))
|
||||||
@@ -35,12 +38,15 @@ def test_stab_generator_fixed_graph():
|
|||||||
def test_stab_generator_random_graph():
|
def test_stab_generator_random_graph():
|
||||||
np.random.seed(42)
|
np.random.seed(42)
|
||||||
from miplearn.problems.stab import MaxWeightStableSetGenerator
|
from miplearn.problems.stab import MaxWeightStableSetGenerator
|
||||||
gen = MaxWeightStableSetGenerator(w=uniform(loc=50., scale=10.),
|
|
||||||
n=randint(low=30, high=41),
|
gen = MaxWeightStableSetGenerator(
|
||||||
p=uniform(loc=0.5, scale=0.),
|
w=uniform(loc=50.0, scale=10.0),
|
||||||
fix_graph=False)
|
n=randint(low=30, high=41),
|
||||||
|
p=uniform(loc=0.5, scale=0.0),
|
||||||
|
fix_graph=False,
|
||||||
|
)
|
||||||
instances = gen.generate(1_000)
|
instances = gen.generate(1_000)
|
||||||
n_nodes = [instance.graph.number_of_nodes() for instance in instances]
|
n_nodes = [instance.graph.number_of_nodes() for instance in instances]
|
||||||
n_edges = [instance.graph.number_of_edges() for instance in instances]
|
n_edges = [instance.graph.number_of_edges() for instance in instances]
|
||||||
assert np.round(np.mean(n_nodes)) == 35.
|
assert np.round(np.mean(n_nodes)) == 35.0
|
||||||
assert np.round(np.mean(n_edges), -1) == 300.
|
assert np.round(np.mean(n_edges), -1) == 300.0
|
||||||
|
|||||||
@@ -11,11 +11,13 @@ from scipy.stats import uniform, randint
|
|||||||
|
|
||||||
|
|
||||||
def test_generator():
|
def test_generator():
|
||||||
instances = TravelingSalesmanGenerator(x=uniform(loc=0.0, scale=1000.0),
|
instances = TravelingSalesmanGenerator(
|
||||||
y=uniform(loc=0.0, scale=1000.0),
|
x=uniform(loc=0.0, scale=1000.0),
|
||||||
n=randint(low=100, high=101),
|
y=uniform(loc=0.0, scale=1000.0),
|
||||||
gamma=uniform(loc=0.95, scale=0.1),
|
n=randint(low=100, high=101),
|
||||||
fix_cities=True).generate(100)
|
gamma=uniform(loc=0.95, scale=0.1),
|
||||||
|
fix_cities=True,
|
||||||
|
).generate(100)
|
||||||
assert len(instances) == 100
|
assert len(instances) == 100
|
||||||
assert instances[0].n_cities == 100
|
assert instances[0].n_cities == 100
|
||||||
assert norm(instances[0].distances - instances[0].distances.T) < 1e-6
|
assert norm(instances[0].distances - instances[0].distances.T) < 1e-6
|
||||||
@@ -25,14 +27,16 @@ def test_generator():
|
|||||||
|
|
||||||
def test_instance():
|
def test_instance():
|
||||||
n_cities = 4
|
n_cities = 4
|
||||||
distances = np.array([
|
distances = np.array(
|
||||||
[0., 1., 2., 1.],
|
[
|
||||||
[1., 0., 1., 2.],
|
[0.0, 1.0, 2.0, 1.0],
|
||||||
[2., 1., 0., 1.],
|
[1.0, 0.0, 1.0, 2.0],
|
||||||
[1., 2., 1., 0.],
|
[2.0, 1.0, 0.0, 1.0],
|
||||||
])
|
[1.0, 2.0, 1.0, 0.0],
|
||||||
|
]
|
||||||
|
)
|
||||||
instance = TravelingSalesmanInstance(n_cities, distances)
|
instance = TravelingSalesmanInstance(n_cities, distances)
|
||||||
for solver_name in ['gurobi']:
|
for solver_name in ["gurobi"]:
|
||||||
solver = LearningSolver(solver=solver_name)
|
solver = LearningSolver(solver=solver_name)
|
||||||
solver.solve(instance)
|
solver.solve(instance)
|
||||||
x = instance.solution["x"]
|
x = instance.solution["x"]
|
||||||
@@ -48,17 +52,19 @@ def test_instance():
|
|||||||
|
|
||||||
def test_subtour():
|
def test_subtour():
|
||||||
n_cities = 6
|
n_cities = 6
|
||||||
cities = np.array([
|
cities = np.array(
|
||||||
[0., 0.],
|
[
|
||||||
[1., 0.],
|
[0.0, 0.0],
|
||||||
[2., 0.],
|
[1.0, 0.0],
|
||||||
[3., 0.],
|
[2.0, 0.0],
|
||||||
[0., 1.],
|
[3.0, 0.0],
|
||||||
[3., 1.],
|
[0.0, 1.0],
|
||||||
])
|
[3.0, 1.0],
|
||||||
|
]
|
||||||
|
)
|
||||||
distances = squareform(pdist(cities))
|
distances = squareform(pdist(cities))
|
||||||
instance = TravelingSalesmanInstance(n_cities, distances)
|
instance = TravelingSalesmanInstance(n_cities, distances)
|
||||||
for solver_name in ['gurobi']:
|
for solver_name in ["gurobi"]:
|
||||||
solver = LearningSolver(solver=solver_name)
|
solver = LearningSolver(solver=solver_name)
|
||||||
solver.solve(instance)
|
solver.solve(instance)
|
||||||
assert hasattr(instance, "found_violated_lazy_constraints")
|
assert hasattr(instance, "found_violated_lazy_constraints")
|
||||||
|
|||||||
@@ -13,20 +13,22 @@ import random
|
|||||||
|
|
||||||
|
|
||||||
class ChallengeA:
|
class ChallengeA:
|
||||||
def __init__(self,
|
def __init__(
|
||||||
seed=42,
|
self,
|
||||||
n_training_instances=500,
|
seed=42,
|
||||||
n_test_instances=50,
|
n_training_instances=500,
|
||||||
):
|
n_test_instances=50,
|
||||||
|
):
|
||||||
|
|
||||||
np.random.seed(seed)
|
np.random.seed(seed)
|
||||||
self.generator = TravelingSalesmanGenerator(x=uniform(loc=0.0, scale=1000.0),
|
self.generator = TravelingSalesmanGenerator(
|
||||||
y=uniform(loc=0.0, scale=1000.0),
|
x=uniform(loc=0.0, scale=1000.0),
|
||||||
n=randint(low=350, high=351),
|
y=uniform(loc=0.0, scale=1000.0),
|
||||||
gamma=uniform(loc=0.95, scale=0.1),
|
n=randint(low=350, high=351),
|
||||||
fix_cities=True,
|
gamma=uniform(loc=0.95, scale=0.1),
|
||||||
round=True,
|
fix_cities=True,
|
||||||
)
|
round=True,
|
||||||
|
)
|
||||||
|
|
||||||
np.random.seed(seed + 1)
|
np.random.seed(seed + 1)
|
||||||
self.training_instances = self.generator.generate(n_training_instances)
|
self.training_instances = self.generator.generate(n_training_instances)
|
||||||
@@ -38,14 +40,15 @@ class ChallengeA:
|
|||||||
class TravelingSalesmanGenerator:
|
class TravelingSalesmanGenerator:
|
||||||
"""Random generator for the Traveling Salesman Problem."""
|
"""Random generator for the Traveling Salesman Problem."""
|
||||||
|
|
||||||
def __init__(self,
|
def __init__(
|
||||||
x=uniform(loc=0.0, scale=1000.0),
|
self,
|
||||||
y=uniform(loc=0.0, scale=1000.0),
|
x=uniform(loc=0.0, scale=1000.0),
|
||||||
n=randint(low=100, high=101),
|
y=uniform(loc=0.0, scale=1000.0),
|
||||||
gamma=uniform(loc=1.0, scale=0.0),
|
n=randint(low=100, high=101),
|
||||||
fix_cities=True,
|
gamma=uniform(loc=1.0, scale=0.0),
|
||||||
round=True,
|
fix_cities=True,
|
||||||
):
|
round=True,
|
||||||
|
):
|
||||||
"""Initializes the problem generator.
|
"""Initializes the problem generator.
|
||||||
|
|
||||||
Initially, the generator creates n cities (x_1,y_1),...,(x_n,y_n) where n, x_i and y_i are
|
Initially, the generator creates n cities (x_1,y_1),...,(x_n,y_n) where n, x_i and y_i are
|
||||||
@@ -79,7 +82,10 @@ class TravelingSalesmanGenerator:
|
|||||||
assert isinstance(x, rv_frozen), "x should be a SciPy probability distribution"
|
assert isinstance(x, rv_frozen), "x should be a SciPy probability distribution"
|
||||||
assert isinstance(y, rv_frozen), "y should be a SciPy probability distribution"
|
assert isinstance(y, rv_frozen), "y should be a SciPy probability distribution"
|
||||||
assert isinstance(n, rv_frozen), "n should be a SciPy probability distribution"
|
assert isinstance(n, rv_frozen), "n should be a SciPy probability distribution"
|
||||||
assert isinstance(gamma, rv_frozen), "gamma should be a SciPy probability distribution"
|
assert isinstance(
|
||||||
|
gamma,
|
||||||
|
rv_frozen,
|
||||||
|
), "gamma should be a SciPy probability distribution"
|
||||||
self.x = x
|
self.x = x
|
||||||
self.y = y
|
self.y = y
|
||||||
self.n = n
|
self.n = n
|
||||||
@@ -103,6 +109,7 @@ class TravelingSalesmanGenerator:
|
|||||||
if self.round:
|
if self.round:
|
||||||
distances = distances.round()
|
distances = distances.round()
|
||||||
return TravelingSalesmanInstance(n, distances)
|
return TravelingSalesmanInstance(n, distances)
|
||||||
|
|
||||||
return [_sample() for _ in range(n_samples)]
|
return [_sample() for _ in range(n_samples)]
|
||||||
|
|
||||||
def _generate_cities(self):
|
def _generate_cities(self):
|
||||||
@@ -128,18 +135,25 @@ class TravelingSalesmanInstance(Instance):
|
|||||||
|
|
||||||
def to_model(self):
|
def to_model(self):
|
||||||
model = pe.ConcreteModel()
|
model = pe.ConcreteModel()
|
||||||
model.edges = edges = [(i,j)
|
model.edges = edges = [
|
||||||
for i in range(self.n_cities)
|
(i, j) for i in range(self.n_cities) for j in range(i + 1, self.n_cities)
|
||||||
for j in range(i+1, self.n_cities)]
|
]
|
||||||
model.x = pe.Var(edges, domain=pe.Binary)
|
model.x = pe.Var(edges, domain=pe.Binary)
|
||||||
model.obj = pe.Objective(expr=sum(model.x[i,j] * self.distances[i,j]
|
model.obj = pe.Objective(
|
||||||
for (i,j) in edges),
|
expr=sum(model.x[i, j] * self.distances[i, j] for (i, j) in edges),
|
||||||
sense=pe.minimize)
|
sense=pe.minimize,
|
||||||
|
)
|
||||||
model.eq_degree = pe.ConstraintList()
|
model.eq_degree = pe.ConstraintList()
|
||||||
model.eq_subtour = pe.ConstraintList()
|
model.eq_subtour = pe.ConstraintList()
|
||||||
for i in range(self.n_cities):
|
for i in range(self.n_cities):
|
||||||
model.eq_degree.add(sum(model.x[min(i,j), max(i,j)]
|
model.eq_degree.add(
|
||||||
for j in range(self.n_cities) if i != j) == 2)
|
sum(
|
||||||
|
model.x[min(i, j), max(i, j)]
|
||||||
|
for j in range(self.n_cities)
|
||||||
|
if i != j
|
||||||
|
)
|
||||||
|
== 2
|
||||||
|
)
|
||||||
return model
|
return model
|
||||||
|
|
||||||
def get_instance_features(self):
|
def get_instance_features(self):
|
||||||
@@ -163,9 +177,12 @@ class TravelingSalesmanInstance(Instance):
|
|||||||
return violations
|
return violations
|
||||||
|
|
||||||
def build_lazy_constraint(self, model, component):
|
def build_lazy_constraint(self, model, component):
|
||||||
cut_edges = [e for e in model.edges
|
cut_edges = [
|
||||||
if (e[0] in component and e[1] not in component) or
|
e
|
||||||
(e[0] not in component and e[1] in component)]
|
for e in model.edges
|
||||||
|
if (e[0] in component and e[1] not in component)
|
||||||
|
or (e[0] not in component and e[1] in component)
|
||||||
|
]
|
||||||
return model.eq_subtour.add(sum(model.x[e] for e in cut_edges) >= 2)
|
return model.eq_subtour.add(sum(model.x[e] for e in cut_edges) >= 2)
|
||||||
|
|
||||||
def find_violated_user_cuts(self, model):
|
def find_violated_user_cuts(self, model):
|
||||||
|
|||||||
@@ -13,10 +13,11 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
|
|
||||||
class GurobiSolver(InternalSolver):
|
class GurobiSolver(InternalSolver):
|
||||||
def __init__(self,
|
def __init__(
|
||||||
params=None,
|
self,
|
||||||
lazy_cb_frequency=1,
|
params=None,
|
||||||
):
|
lazy_cb_frequency=1,
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
An InternalSolver backed by Gurobi's Python API (without Pyomo).
|
An InternalSolver backed by Gurobi's Python API (without Pyomo).
|
||||||
|
|
||||||
@@ -33,6 +34,7 @@ class GurobiSolver(InternalSolver):
|
|||||||
if params is None:
|
if params is None:
|
||||||
params = {}
|
params = {}
|
||||||
from gurobipy import GRB
|
from gurobipy import GRB
|
||||||
|
|
||||||
self.GRB = GRB
|
self.GRB = GRB
|
||||||
self.instance = None
|
self.instance = None
|
||||||
self.model = None
|
self.model = None
|
||||||
@@ -44,8 +46,7 @@ class GurobiSolver(InternalSolver):
|
|||||||
if lazy_cb_frequency == 1:
|
if lazy_cb_frequency == 1:
|
||||||
self.lazy_cb_where = [self.GRB.Callback.MIPSOL]
|
self.lazy_cb_where = [self.GRB.Callback.MIPSOL]
|
||||||
else:
|
else:
|
||||||
self.lazy_cb_where = [self.GRB.Callback.MIPSOL,
|
self.lazy_cb_where = [self.GRB.Callback.MIPSOL, self.GRB.Callback.MIPNODE]
|
||||||
self.GRB.Callback.MIPNODE]
|
|
||||||
|
|
||||||
def set_instance(self, instance, model=None):
|
def set_instance(self, instance, model=None):
|
||||||
self._raise_if_callback()
|
self._raise_if_callback()
|
||||||
@@ -70,14 +71,15 @@ class GurobiSolver(InternalSolver):
|
|||||||
idx = [0]
|
idx = [0]
|
||||||
else:
|
else:
|
||||||
name = m.group(1)
|
name = m.group(1)
|
||||||
idx = tuple(int(k) if k.isdecimal() else k
|
idx = tuple(
|
||||||
for k in m.group(2).split(","))
|
int(k) if k.isdecimal() else k for k in m.group(2).split(",")
|
||||||
|
)
|
||||||
if len(idx) == 1:
|
if len(idx) == 1:
|
||||||
idx = idx[0]
|
idx = idx[0]
|
||||||
if name not in self._all_vars:
|
if name not in self._all_vars:
|
||||||
self._all_vars[name] = {}
|
self._all_vars[name] = {}
|
||||||
self._all_vars[name][idx] = var
|
self._all_vars[name][idx] = var
|
||||||
if var.vtype != 'C':
|
if var.vtype != "C":
|
||||||
if name not in self._bin_vars:
|
if name not in self._bin_vars:
|
||||||
self._bin_vars[name] = {}
|
self._bin_vars[name] = {}
|
||||||
self._bin_vars[name][idx] = var
|
self._bin_vars[name][idx] = var
|
||||||
@@ -103,15 +105,9 @@ class GurobiSolver(InternalSolver):
|
|||||||
for (idx, var) in vardict.items():
|
for (idx, var) in vardict.items():
|
||||||
var.vtype = self.GRB.BINARY
|
var.vtype = self.GRB.BINARY
|
||||||
log = streams[0].getvalue()
|
log = streams[0].getvalue()
|
||||||
return {
|
return {"Optimal value": self.model.objVal, "Log": log}
|
||||||
"Optimal value": self.model.objVal,
|
|
||||||
"Log": log
|
|
||||||
}
|
|
||||||
|
|
||||||
def solve(self,
|
def solve(self, tee=False, iteration_cb=None, lazy_cb=None):
|
||||||
tee=False,
|
|
||||||
iteration_cb=None,
|
|
||||||
lazy_cb=None):
|
|
||||||
self._raise_if_callback()
|
self._raise_if_callback()
|
||||||
|
|
||||||
def cb_wrapper(cb_model, cb_where):
|
def cb_wrapper(cb_model, cb_where):
|
||||||
@@ -133,7 +129,7 @@ class GurobiSolver(InternalSolver):
|
|||||||
if tee:
|
if tee:
|
||||||
streams += [sys.stdout]
|
streams += [sys.stdout]
|
||||||
if iteration_cb is None:
|
if iteration_cb is None:
|
||||||
iteration_cb = lambda : False
|
iteration_cb = lambda: False
|
||||||
while True:
|
while True:
|
||||||
logger.debug("Solving MIP...")
|
logger.debug("Solving MIP...")
|
||||||
with RedirectOutput(streams):
|
with RedirectOutput(streams):
|
||||||
@@ -187,7 +183,9 @@ class GurobiSolver(InternalSolver):
|
|||||||
elif self.cb_where is None:
|
elif self.cb_where is None:
|
||||||
return var.x
|
return var.x
|
||||||
else:
|
else:
|
||||||
raise Exception("get_value cannot be called from cb_where=%s" % self.cb_where)
|
raise Exception(
|
||||||
|
"get_value cannot be called from cb_where=%s" % self.cb_where
|
||||||
|
)
|
||||||
|
|
||||||
def get_variables(self):
|
def get_variables(self):
|
||||||
self._raise_if_callback()
|
self._raise_if_callback()
|
||||||
@@ -220,8 +218,10 @@ class GurobiSolver(InternalSolver):
|
|||||||
if value is not None:
|
if value is not None:
|
||||||
count_fixed += 1
|
count_fixed += 1
|
||||||
self._all_vars[varname][idx].start = value
|
self._all_vars[varname][idx].start = value
|
||||||
logger.info("Setting start values for %d variables (out of %d)" %
|
logger.info(
|
||||||
(count_fixed, count_total))
|
"Setting start values for %d variables (out of %d)"
|
||||||
|
% (count_fixed, count_total)
|
||||||
|
)
|
||||||
|
|
||||||
def clear_warm_start(self):
|
def clear_warm_start(self):
|
||||||
self._raise_if_callback()
|
self._raise_if_callback()
|
||||||
@@ -248,10 +248,7 @@ class GurobiSolver(InternalSolver):
|
|||||||
def extract_constraint(self, cid):
|
def extract_constraint(self, cid):
|
||||||
self._raise_if_callback()
|
self._raise_if_callback()
|
||||||
constr = self.model.getConstrByName(cid)
|
constr = self.model.getConstrByName(cid)
|
||||||
cobj = (self.model.getRow(constr),
|
cobj = (self.model.getRow(constr), constr.sense, constr.RHS, constr.ConstrName)
|
||||||
constr.sense,
|
|
||||||
constr.RHS,
|
|
||||||
constr.ConstrName)
|
|
||||||
self.model.remove(constr)
|
self.model.remove(constr)
|
||||||
return cobj
|
return cobj
|
||||||
|
|
||||||
@@ -324,6 +321,7 @@ class GurobiSolver(InternalSolver):
|
|||||||
|
|
||||||
def __setstate__(self, state):
|
def __setstate__(self, state):
|
||||||
from gurobipy import GRB
|
from gurobipy import GRB
|
||||||
|
|
||||||
self.params = state["params"]
|
self.params = state["params"]
|
||||||
self.lazy_cb_where = state["lazy_cb_where"]
|
self.lazy_cb_where = state["lazy_cb_where"]
|
||||||
self.GRB = GRB
|
self.GRB = GRB
|
||||||
|
|||||||
@@ -222,4 +222,3 @@ class InternalSolver(ABC):
|
|||||||
for idx in indices:
|
for idx in indices:
|
||||||
solution[var][idx] = 0.0
|
solution[var][idx] = 0.0
|
||||||
return solution
|
return solution
|
||||||
|
|
||||||
|
|||||||
@@ -12,10 +12,12 @@ from copy import deepcopy
|
|||||||
from typing import Optional, List
|
from typing import Optional, List
|
||||||
from p_tqdm import p_map
|
from p_tqdm import p_map
|
||||||
|
|
||||||
from .. import (ObjectiveValueComponent,
|
from .. import (
|
||||||
PrimalSolutionComponent,
|
ObjectiveValueComponent,
|
||||||
DynamicLazyConstraintsComponent,
|
PrimalSolutionComponent,
|
||||||
UserCutsComponent)
|
DynamicLazyConstraintsComponent,
|
||||||
|
UserCutsComponent,
|
||||||
|
)
|
||||||
from .pyomo.cplex import CplexPyomoSolver
|
from .pyomo.cplex import CplexPyomoSolver
|
||||||
from .pyomo.gurobi import GurobiPyomoSolver
|
from .pyomo.gurobi import GurobiPyomoSolver
|
||||||
|
|
||||||
@@ -43,16 +45,18 @@ def _parallel_solve(idx):
|
|||||||
|
|
||||||
|
|
||||||
class LearningSolver:
|
class LearningSolver:
|
||||||
def __init__(self,
|
def __init__(
|
||||||
components=None,
|
self,
|
||||||
gap_tolerance=1e-4,
|
components=None,
|
||||||
mode="exact",
|
gap_tolerance=1e-4,
|
||||||
solver="gurobi",
|
mode="exact",
|
||||||
threads=None,
|
solver="gurobi",
|
||||||
time_limit=None,
|
threads=None,
|
||||||
node_limit=None,
|
time_limit=None,
|
||||||
solve_lp_first=True,
|
node_limit=None,
|
||||||
use_lazy_cb=False):
|
solve_lp_first=True,
|
||||||
|
use_lazy_cb=False,
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
Mixed-Integer Linear Programming (MIP) solver that extracts information
|
Mixed-Integer Linear Programming (MIP) solver that extracts information
|
||||||
from previous runs and uses Machine Learning methods to accelerate the
|
from previous runs and uses Machine Learning methods to accelerate the
|
||||||
@@ -142,11 +146,13 @@ class LearningSolver:
|
|||||||
solver.set_node_limit(self.node_limit)
|
solver.set_node_limit(self.node_limit)
|
||||||
return solver
|
return solver
|
||||||
|
|
||||||
def solve(self,
|
def solve(
|
||||||
instance,
|
self,
|
||||||
model=None,
|
instance,
|
||||||
output="",
|
model=None,
|
||||||
tee=False):
|
output="",
|
||||||
|
tee=False,
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
Solves the given instance. If trained machine-learning models are
|
Solves the given instance. If trained machine-learning models are
|
||||||
available, they will be used to accelerate the solution process.
|
available, they will be used to accelerate the solution process.
|
||||||
@@ -248,9 +254,11 @@ class LearningSolver:
|
|||||||
lazy_cb = lazy_cb_wrapper
|
lazy_cb = lazy_cb_wrapper
|
||||||
|
|
||||||
logger.info("Solving MILP...")
|
logger.info("Solving MILP...")
|
||||||
results = self.internal_solver.solve(tee=tee,
|
results = self.internal_solver.solve(
|
||||||
iteration_cb=iteration_cb,
|
tee=tee,
|
||||||
lazy_cb=lazy_cb)
|
iteration_cb=iteration_cb,
|
||||||
|
lazy_cb=lazy_cb,
|
||||||
|
)
|
||||||
results["LP value"] = instance.lp_value
|
results["LP value"] = instance.lp_value
|
||||||
|
|
||||||
# Read MIP solution and bounds
|
# Read MIP solution and bounds
|
||||||
@@ -306,10 +314,12 @@ class LearningSolver:
|
|||||||
SOLVER[0] = self
|
SOLVER[0] = self
|
||||||
OUTPUTS[0] = output
|
OUTPUTS[0] = output
|
||||||
INSTANCES[0] = instances
|
INSTANCES[0] = instances
|
||||||
results = p_map(_parallel_solve,
|
results = p_map(
|
||||||
list(range(len(instances))),
|
_parallel_solve,
|
||||||
num_cpus=n_jobs,
|
list(range(len(instances))),
|
||||||
desc=label)
|
num_cpus=n_jobs,
|
||||||
|
desc=label,
|
||||||
|
)
|
||||||
stats = []
|
stats = []
|
||||||
for (idx, (s, instance)) in enumerate(results):
|
for (idx, (s, instance)) in enumerate(results):
|
||||||
stats.append(s)
|
stats.append(s)
|
||||||
|
|||||||
@@ -81,8 +81,10 @@ class BasePyomoSolver(InternalSolver):
|
|||||||
count_fixed += 1
|
count_fixed += 1
|
||||||
if count_fixed > 0:
|
if count_fixed > 0:
|
||||||
self._is_warm_start_available = True
|
self._is_warm_start_available = True
|
||||||
logger.info("Setting start values for %d variables (out of %d)" %
|
logger.info(
|
||||||
(count_fixed, count_total))
|
"Setting start values for %d variables (out of %d)"
|
||||||
|
% (count_fixed, count_total)
|
||||||
|
)
|
||||||
|
|
||||||
def clear_warm_start(self):
|
def clear_warm_start(self):
|
||||||
for var in self._all_vars:
|
for var in self._all_vars:
|
||||||
@@ -134,17 +136,19 @@ class BasePyomoSolver(InternalSolver):
|
|||||||
count_fixed += 1
|
count_fixed += 1
|
||||||
var[index].fix(solution[varname][index])
|
var[index].fix(solution[varname][index])
|
||||||
self._pyomo_solver.update_var(var[index])
|
self._pyomo_solver.update_var(var[index])
|
||||||
logger.info("Fixing values for %d variables (out of %d)" %
|
logger.info(
|
||||||
(count_fixed, count_total))
|
"Fixing values for %d variables (out of %d)"
|
||||||
|
% (
|
||||||
|
count_fixed,
|
||||||
|
count_total,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
def add_constraint(self, constraint):
|
def add_constraint(self, constraint):
|
||||||
self._pyomo_solver.add_constraint(constraint)
|
self._pyomo_solver.add_constraint(constraint)
|
||||||
self._update_constrs()
|
self._update_constrs()
|
||||||
|
|
||||||
def solve(self,
|
def solve(self, tee=False, iteration_cb=None, lazy_cb=None):
|
||||||
tee=False,
|
|
||||||
iteration_cb=None,
|
|
||||||
lazy_cb=None):
|
|
||||||
if lazy_cb is not None:
|
if lazy_cb is not None:
|
||||||
raise Exception("lazy callback not supported")
|
raise Exception("lazy callback not supported")
|
||||||
total_wallclock_time = 0
|
total_wallclock_time = 0
|
||||||
@@ -158,8 +162,10 @@ class BasePyomoSolver(InternalSolver):
|
|||||||
while True:
|
while True:
|
||||||
logger.debug("Solving MIP...")
|
logger.debug("Solving MIP...")
|
||||||
with RedirectOutput(streams):
|
with RedirectOutput(streams):
|
||||||
results = self._pyomo_solver.solve(tee=True,
|
results = self._pyomo_solver.solve(
|
||||||
warmstart=self._is_warm_start_available)
|
tee=True,
|
||||||
|
warmstart=self._is_warm_start_available,
|
||||||
|
)
|
||||||
total_wallclock_time += results["Solver"][0]["Wallclock time"]
|
total_wallclock_time += results["Solver"][0]["Wallclock time"]
|
||||||
should_repeat = iteration_cb()
|
should_repeat = iteration_cb()
|
||||||
if not should_repeat:
|
if not should_repeat:
|
||||||
@@ -192,9 +198,7 @@ class BasePyomoSolver(InternalSolver):
|
|||||||
return value
|
return value
|
||||||
|
|
||||||
def _extract_node_count(self, log):
|
def _extract_node_count(self, log):
|
||||||
return int(self.__extract(log,
|
return int(self.__extract(log, self._get_node_count_regexp(), default=1))
|
||||||
self._get_node_count_regexp(),
|
|
||||||
default=1))
|
|
||||||
|
|
||||||
def set_threads(self, threads):
|
def set_threads(self, threads):
|
||||||
key = self._get_threads_option_name()
|
key = self._get_threads_option_name()
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ class CplexPyomoSolver(BasePyomoSolver):
|
|||||||
{"mip_display": 5} to increase the log verbosity.
|
{"mip_display": 5} to increase the log verbosity.
|
||||||
"""
|
"""
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self._pyomo_solver = pe.SolverFactory('cplex_persistent')
|
self._pyomo_solver = pe.SolverFactory("cplex_persistent")
|
||||||
self._pyomo_solver.options["randomseed"] = randint(low=0, high=1000).rvs()
|
self._pyomo_solver.options["randomseed"] = randint(low=0, high=1000).rvs()
|
||||||
self._pyomo_solver.options["mip_display"] = 4
|
self._pyomo_solver.options["mip_display"] = 4
|
||||||
if options is not None:
|
if options is not None:
|
||||||
|
|||||||
@@ -15,8 +15,7 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
|
|
||||||
class GurobiPyomoSolver(BasePyomoSolver):
|
class GurobiPyomoSolver(BasePyomoSolver):
|
||||||
def __init__(self,
|
def __init__(self, options=None):
|
||||||
options=None):
|
|
||||||
"""
|
"""
|
||||||
Creates a new Gurobi solver, accessed through Pyomo.
|
Creates a new Gurobi solver, accessed through Pyomo.
|
||||||
|
|
||||||
@@ -27,7 +26,7 @@ class GurobiPyomoSolver(BasePyomoSolver):
|
|||||||
{"Threads": 4} to set the number of threads.
|
{"Threads": 4} to set the number of threads.
|
||||||
"""
|
"""
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self._pyomo_solver = pe.SolverFactory('gurobi_persistent')
|
self._pyomo_solver = pe.SolverFactory("gurobi_persistent")
|
||||||
self._pyomo_solver.options["Seed"] = randint(low=0, high=1000).rvs()
|
self._pyomo_solver.options["Seed"] = randint(low=0, high=1000).rvs()
|
||||||
if options is not None:
|
if options is not None:
|
||||||
for (key, value) in options.items():
|
for (key, value) in options.items():
|
||||||
@@ -56,6 +55,7 @@ class GurobiPyomoSolver(BasePyomoSolver):
|
|||||||
|
|
||||||
def set_branching_priorities(self, priorities):
|
def set_branching_priorities(self, priorities):
|
||||||
from gurobipy import GRB
|
from gurobipy import GRB
|
||||||
|
|
||||||
for varname in priorities.keys():
|
for varname in priorities.keys():
|
||||||
var = self._varname_to_var[varname]
|
var = self._varname_to_var[varname]
|
||||||
for (index, priority) in priorities[varname].items():
|
for (index, priority) in priorities[varname].items():
|
||||||
|
|||||||
@@ -9,20 +9,22 @@ from miplearn.problems.knapsack import KnapsackInstance, GurobiKnapsackInstance
|
|||||||
|
|
||||||
def _get_instance(solver):
|
def _get_instance(solver):
|
||||||
def _is_subclass_or_instance(solver, parentClass):
|
def _is_subclass_or_instance(solver, parentClass):
|
||||||
return isinstance(solver, parentClass) or (isclass(solver) and issubclass(solver, parentClass))
|
return isinstance(solver, parentClass) or (
|
||||||
|
isclass(solver) and issubclass(solver, parentClass)
|
||||||
|
)
|
||||||
|
|
||||||
if _is_subclass_or_instance(solver, BasePyomoSolver):
|
if _is_subclass_or_instance(solver, BasePyomoSolver):
|
||||||
return KnapsackInstance(
|
return KnapsackInstance(
|
||||||
weights=[23., 26., 20., 18.],
|
weights=[23.0, 26.0, 20.0, 18.0],
|
||||||
prices=[505., 352., 458., 220.],
|
prices=[505.0, 352.0, 458.0, 220.0],
|
||||||
capacity=67.,
|
capacity=67.0,
|
||||||
)
|
)
|
||||||
|
|
||||||
if _is_subclass_or_instance(solver, GurobiSolver):
|
if _is_subclass_or_instance(solver, GurobiSolver):
|
||||||
return GurobiKnapsackInstance(
|
return GurobiKnapsackInstance(
|
||||||
weights=[23., 26., 20., 18.],
|
weights=[23.0, 26.0, 20.0, 18.0],
|
||||||
prices=[505., 352., 458., 220.],
|
prices=[505.0, 352.0, 458.0, 220.0],
|
||||||
capacity=67.,
|
capacity=67.0,
|
||||||
)
|
)
|
||||||
|
|
||||||
assert False
|
assert False
|
||||||
|
|||||||
@@ -16,6 +16,7 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
def test_redirect_output():
|
def test_redirect_output():
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
original_stdout = sys.stdout
|
original_stdout = sys.stdout
|
||||||
io = StringIO()
|
io = StringIO()
|
||||||
with RedirectOutput([io]):
|
with RedirectOutput([io]):
|
||||||
@@ -31,36 +32,42 @@ def test_internal_solver_warm_starts():
|
|||||||
model = instance.to_model()
|
model = instance.to_model()
|
||||||
solver = solver_class()
|
solver = solver_class()
|
||||||
solver.set_instance(instance, model)
|
solver.set_instance(instance, model)
|
||||||
solver.set_warm_start({
|
solver.set_warm_start(
|
||||||
"x": {
|
{
|
||||||
0: 1.0,
|
"x": {
|
||||||
1: 0.0,
|
0: 1.0,
|
||||||
2: 0.0,
|
1: 0.0,
|
||||||
3: 1.0,
|
2: 0.0,
|
||||||
|
3: 1.0,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
})
|
)
|
||||||
stats = solver.solve(tee=True)
|
stats = solver.solve(tee=True)
|
||||||
assert stats["Warm start value"] == 725.0
|
assert stats["Warm start value"] == 725.0
|
||||||
|
|
||||||
solver.set_warm_start({
|
solver.set_warm_start(
|
||||||
"x": {
|
{
|
||||||
0: 1.0,
|
"x": {
|
||||||
1: 1.0,
|
0: 1.0,
|
||||||
2: 1.0,
|
1: 1.0,
|
||||||
3: 1.0,
|
2: 1.0,
|
||||||
|
3: 1.0,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
})
|
)
|
||||||
stats = solver.solve(tee=True)
|
stats = solver.solve(tee=True)
|
||||||
assert stats["Warm start value"] is None
|
assert stats["Warm start value"] is None
|
||||||
|
|
||||||
solver.fix({
|
solver.fix(
|
||||||
"x": {
|
{
|
||||||
0: 1.0,
|
"x": {
|
||||||
1: 0.0,
|
0: 1.0,
|
||||||
2: 0.0,
|
1: 0.0,
|
||||||
3: 1.0,
|
2: 0.0,
|
||||||
|
3: 1.0,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
})
|
)
|
||||||
stats = solver.solve(tee=True)
|
stats = solver.solve(tee=True)
|
||||||
assert stats["Lower bound"] == 725.0
|
assert stats["Lower bound"] == 725.0
|
||||||
assert stats["Upper bound"] == 725.0
|
assert stats["Upper bound"] == 725.0
|
||||||
|
|||||||
@@ -20,11 +20,13 @@ def test_learning_solver():
|
|||||||
for internal_solver in _get_internal_solvers():
|
for internal_solver in _get_internal_solvers():
|
||||||
logger.info("Solver: %s" % internal_solver)
|
logger.info("Solver: %s" % internal_solver)
|
||||||
instance = _get_instance(internal_solver)
|
instance = _get_instance(internal_solver)
|
||||||
solver = LearningSolver(time_limit=300,
|
solver = LearningSolver(
|
||||||
gap_tolerance=1e-3,
|
time_limit=300,
|
||||||
threads=1,
|
gap_tolerance=1e-3,
|
||||||
solver=internal_solver,
|
threads=1,
|
||||||
mode=mode)
|
solver=internal_solver,
|
||||||
|
mode=mode,
|
||||||
|
)
|
||||||
|
|
||||||
solver.solve(instance)
|
solver.solve(instance)
|
||||||
assert instance.solution["x"][0] == 1.0
|
assert instance.solution["x"][0] == 1.0
|
||||||
@@ -74,8 +76,7 @@ def test_solve_fit_from_disk():
|
|||||||
filenames = []
|
filenames = []
|
||||||
for k in range(3):
|
for k in range(3):
|
||||||
instance = _get_instance(internal_solver)
|
instance = _get_instance(internal_solver)
|
||||||
with tempfile.NamedTemporaryFile(suffix=".pkl",
|
with tempfile.NamedTemporaryFile(suffix=".pkl", delete=False) as file:
|
||||||
delete=False) as file:
|
|
||||||
filenames += [file.name]
|
filenames += [file.name]
|
||||||
pickle.dump(instance, file)
|
pickle.dump(instance, file)
|
||||||
|
|
||||||
|
|||||||
@@ -8,14 +8,14 @@ from miplearn.problems.knapsack import KnapsackInstance
|
|||||||
def get_test_pyomo_instances():
|
def get_test_pyomo_instances():
|
||||||
instances = [
|
instances = [
|
||||||
KnapsackInstance(
|
KnapsackInstance(
|
||||||
weights=[23., 26., 20., 18.],
|
weights=[23.0, 26.0, 20.0, 18.0],
|
||||||
prices=[505., 352., 458., 220.],
|
prices=[505.0, 352.0, 458.0, 220.0],
|
||||||
capacity=67.,
|
capacity=67.0,
|
||||||
),
|
),
|
||||||
KnapsackInstance(
|
KnapsackInstance(
|
||||||
weights=[25., 30., 22., 18.],
|
weights=[25.0, 30.0, 22.0, 18.0],
|
||||||
prices=[500., 365., 420., 150.],
|
prices=[500.0, 365.0, 420.0, 150.0],
|
||||||
capacity=70.,
|
capacity=70.0,
|
||||||
),
|
),
|
||||||
]
|
]
|
||||||
models = [instance.to_model() for instance in instances]
|
models = [instance.to_model() for instance in instances]
|
||||||
|
|||||||
@@ -11,8 +11,9 @@ from scipy.stats import randint
|
|||||||
|
|
||||||
def test_benchmark():
|
def test_benchmark():
|
||||||
# Generate training and test instances
|
# Generate training and test instances
|
||||||
train_instances = MaxWeightStableSetGenerator(n=randint(low=25, high=26)).generate(5)
|
generator = MaxWeightStableSetGenerator(n=randint(low=25, high=26))
|
||||||
test_instances = MaxWeightStableSetGenerator(n=randint(low=25, high=26)).generate(3)
|
train_instances = generator.generate(5)
|
||||||
|
test_instances = generator.generate(3)
|
||||||
|
|
||||||
# Training phase...
|
# Training phase...
|
||||||
training_solver = LearningSolver()
|
training_solver = LearningSolver()
|
||||||
@@ -26,11 +27,11 @@ def test_benchmark():
|
|||||||
benchmark = BenchmarkRunner(test_solvers)
|
benchmark = BenchmarkRunner(test_solvers)
|
||||||
benchmark.fit(train_instances)
|
benchmark.fit(train_instances)
|
||||||
benchmark.parallel_solve(test_instances, n_jobs=2, n_trials=2)
|
benchmark.parallel_solve(test_instances, n_jobs=2, n_trials=2)
|
||||||
assert benchmark.raw_results().values.shape == (12,16)
|
assert benchmark.raw_results().values.shape == (12, 16)
|
||||||
|
|
||||||
benchmark.save_results("/tmp/benchmark.csv")
|
benchmark.save_results("/tmp/benchmark.csv")
|
||||||
assert os.path.isfile("/tmp/benchmark.csv")
|
assert os.path.isfile("/tmp/benchmark.csv")
|
||||||
|
|
||||||
benchmark = BenchmarkRunner(test_solvers)
|
benchmark = BenchmarkRunner(test_solvers)
|
||||||
benchmark.load_results("/tmp/benchmark.csv")
|
benchmark.load_results("/tmp/benchmark.csv")
|
||||||
assert benchmark.raw_results().values.shape == (12,16)
|
assert benchmark.raw_results().values.shape == (12, 16)
|
||||||
|
|||||||
@@ -3,25 +3,28 @@
|
|||||||
# Released under the modified BSD license. See COPYING.md for more details.
|
# Released under the modified BSD license. See COPYING.md for more details.
|
||||||
|
|
||||||
from miplearn.problems.knapsack import KnapsackInstance
|
from miplearn.problems.knapsack import KnapsackInstance
|
||||||
from miplearn import (LearningSolver,
|
from miplearn import (
|
||||||
SolutionExtractor,
|
LearningSolver,
|
||||||
InstanceFeaturesExtractor,
|
SolutionExtractor,
|
||||||
VariableFeaturesExtractor,
|
InstanceFeaturesExtractor,
|
||||||
)
|
VariableFeaturesExtractor,
|
||||||
|
)
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import pyomo.environ as pe
|
import pyomo.environ as pe
|
||||||
|
|
||||||
|
|
||||||
def _get_instances():
|
def _get_instances():
|
||||||
instances = [
|
instances = [
|
||||||
KnapsackInstance(weights=[1., 2., 3.],
|
KnapsackInstance(
|
||||||
prices=[10., 20., 30.],
|
weights=[1.0, 2.0, 3.0],
|
||||||
capacity=2.5,
|
prices=[10.0, 20.0, 30.0],
|
||||||
),
|
capacity=2.5,
|
||||||
KnapsackInstance(weights=[3., 4., 5.],
|
),
|
||||||
prices=[20., 30., 40.],
|
KnapsackInstance(
|
||||||
capacity=4.5,
|
weights=[3.0, 4.0, 5.0],
|
||||||
),
|
prices=[20.0, 30.0, 40.0],
|
||||||
|
capacity=4.5,
|
||||||
|
),
|
||||||
]
|
]
|
||||||
models = [instance.to_model() for instance in instances]
|
models = [instance.to_model() for instance in instances]
|
||||||
solver = LearningSolver()
|
solver = LearningSolver()
|
||||||
@@ -38,19 +41,25 @@ def test_solution_extractor():
|
|||||||
assert isinstance(features["default"], np.ndarray)
|
assert isinstance(features["default"], np.ndarray)
|
||||||
assert features["default"].shape == (6, 2)
|
assert features["default"].shape == (6, 2)
|
||||||
assert features["default"].ravel().tolist() == [
|
assert features["default"].ravel().tolist() == [
|
||||||
1., 0.,
|
1.0,
|
||||||
0., 1.,
|
0.0,
|
||||||
1., 0.,
|
0.0,
|
||||||
1., 0.,
|
1.0,
|
||||||
0., 1.,
|
1.0,
|
||||||
1., 0.,
|
0.0,
|
||||||
|
1.0,
|
||||||
|
0.0,
|
||||||
|
0.0,
|
||||||
|
1.0,
|
||||||
|
1.0,
|
||||||
|
0.0,
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
def test_instance_features_extractor():
|
def test_instance_features_extractor():
|
||||||
instances, models = _get_instances()
|
instances, models = _get_instances()
|
||||||
features = InstanceFeaturesExtractor().extract(instances)
|
features = InstanceFeaturesExtractor().extract(instances)
|
||||||
assert features.shape == (2,3)
|
assert features.shape == (2, 3)
|
||||||
|
|
||||||
|
|
||||||
def test_variable_features_extractor():
|
def test_variable_features_extractor():
|
||||||
@@ -58,5 +67,4 @@ def test_variable_features_extractor():
|
|||||||
features = VariableFeaturesExtractor().extract(instances)
|
features = VariableFeaturesExtractor().extract(instances)
|
||||||
assert isinstance(features, dict)
|
assert isinstance(features, dict)
|
||||||
assert "default" in features
|
assert "default" in features
|
||||||
assert features["default"].shape == (6,5)
|
assert features["default"].shape == (6, 5)
|
||||||
|
|
||||||
|
|||||||
3
pyproject.toml
Normal file
3
pyproject.toml
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
[tool.black]
|
||||||
|
py36 = true
|
||||||
|
include = '\.pyi?$'
|
||||||
@@ -12,3 +12,5 @@ python-markdown-math~=0.8
|
|||||||
seaborn~=0.11
|
seaborn~=0.11
|
||||||
scikit-learn~=0.23
|
scikit-learn~=0.23
|
||||||
tqdm~=4.54
|
tqdm~=4.54
|
||||||
|
black==20.8b1
|
||||||
|
pre-commit~=2.9
|
||||||
|
|||||||
Reference in New Issue
Block a user