mirror of
https://github.com/ANL-CEEESA/MIPLearn.git
synced 2025-12-06 09:28:51 -06:00
Move tests to separate folder
This commit is contained in:
26
tests/__init__.py
Normal file
26
tests/__init__.py
Normal file
@@ -0,0 +1,26 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from miplearn.problems.knapsack import KnapsackInstance
|
||||
from miplearn.solvers.learning import LearningSolver
|
||||
|
||||
|
||||
def get_test_pyomo_instances():
|
||||
instances = [
|
||||
KnapsackInstance(
|
||||
weights=[23.0, 26.0, 20.0, 18.0],
|
||||
prices=[505.0, 352.0, 458.0, 220.0],
|
||||
capacity=67.0,
|
||||
),
|
||||
KnapsackInstance(
|
||||
weights=[25.0, 30.0, 22.0, 18.0],
|
||||
prices=[500.0, 365.0, 420.0, 150.0],
|
||||
capacity=70.0,
|
||||
),
|
||||
]
|
||||
models = [instance.to_model() for instance in instances]
|
||||
solver = LearningSolver()
|
||||
for i in range(len(instances)):
|
||||
solver.solve(instances[i], models[i])
|
||||
return instances, models
|
||||
3
tests/classifiers/__init__.py
Normal file
3
tests/classifiers/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
18
tests/classifiers/test_counting.py
Normal file
18
tests/classifiers/test_counting.py
Normal file
@@ -0,0 +1,18 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
import numpy as np
|
||||
from numpy.linalg import norm
|
||||
|
||||
from miplearn.classifiers.counting import CountingClassifier
|
||||
|
||||
E = 0.1
|
||||
|
||||
|
||||
def test_counting():
|
||||
clf = CountingClassifier()
|
||||
clf.fit(np.zeros((8, 25)), [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0])
|
||||
expected_proba = np.array([[0.375, 0.625], [0.375, 0.625]])
|
||||
actual_proba = clf.predict_proba(np.zeros((2, 25)))
|
||||
assert norm(actual_proba - expected_proba) < E
|
||||
49
tests/classifiers/test_cv.py
Normal file
49
tests/classifiers/test_cv.py
Normal file
@@ -0,0 +1,49 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
import numpy as np
|
||||
from numpy.linalg import norm
|
||||
from sklearn.preprocessing import StandardScaler
|
||||
from sklearn.svm import SVC
|
||||
|
||||
from miplearn.classifiers.cv import CrossValidatedClassifier
|
||||
|
||||
E = 0.1
|
||||
|
||||
|
||||
def test_cv():
|
||||
# Training set: label is true if point is inside a 2D circle
|
||||
x_train = np.array([[x1, x2] for x1 in range(-10, 11) for x2 in range(-10, 11)])
|
||||
x_train = StandardScaler().fit_transform(x_train)
|
||||
n_samples = x_train.shape[0]
|
||||
|
||||
y_train = np.array(
|
||||
[
|
||||
1.0 if x1 * x1 + x2 * x2 <= 100 else 0.0
|
||||
for x1 in range(-10, 11)
|
||||
for x2 in range(-10, 11)
|
||||
]
|
||||
)
|
||||
|
||||
# Support vector machines with linear kernels do not perform well on this
|
||||
# data set, so predictor should return the given constant.
|
||||
clf = CrossValidatedClassifier(
|
||||
classifier=SVC(probability=True, random_state=42),
|
||||
threshold=0.90,
|
||||
constant=0.0,
|
||||
cv=30,
|
||||
)
|
||||
clf.fit(x_train, y_train)
|
||||
assert norm(np.zeros(n_samples) - clf.predict(x_train)) < E
|
||||
|
||||
# Support vector machines with quadratic kernels perform almost perfectly
|
||||
# on this data set, so predictor should return their prediction.
|
||||
clf = CrossValidatedClassifier(
|
||||
classifier=SVC(probability=True, kernel="poly", degree=2, random_state=42),
|
||||
threshold=0.90,
|
||||
cv=30,
|
||||
)
|
||||
clf.fit(x_train, y_train)
|
||||
print(y_train - clf.predict(x_train))
|
||||
assert norm(y_train - clf.predict(x_train)) < E
|
||||
20
tests/classifiers/test_evaluator.py
Normal file
20
tests/classifiers/test_evaluator.py
Normal file
@@ -0,0 +1,20 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
import numpy as np
|
||||
from sklearn.neighbors import KNeighborsClassifier
|
||||
|
||||
from miplearn.classifiers.evaluator import ClassifierEvaluator
|
||||
|
||||
|
||||
def test_evaluator():
|
||||
clf_a = KNeighborsClassifier(n_neighbors=1)
|
||||
clf_b = KNeighborsClassifier(n_neighbors=2)
|
||||
x_train = np.array([[0, 0], [1, 0]])
|
||||
y_train = np.array([0, 1])
|
||||
clf_a.fit(x_train, y_train)
|
||||
clf_b.fit(x_train, y_train)
|
||||
ev = ClassifierEvaluator()
|
||||
assert ev.evaluate(clf_a, x_train, y_train) == 1.0
|
||||
assert ev.evaluate(clf_b, x_train, y_train) == 0.5
|
||||
38
tests/classifiers/test_threshold.py
Normal file
38
tests/classifiers/test_threshold.py
Normal file
@@ -0,0 +1,38 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from unittest.mock import Mock
|
||||
|
||||
import numpy as np
|
||||
|
||||
from miplearn.classifiers import Classifier
|
||||
from miplearn.classifiers.threshold import MinPrecisionThreshold
|
||||
|
||||
|
||||
def test_threshold_dynamic():
|
||||
clf = Mock(spec=Classifier)
|
||||
clf.predict_proba = Mock(
|
||||
return_value=np.array(
|
||||
[
|
||||
[0.10, 0.90],
|
||||
[0.10, 0.90],
|
||||
[0.20, 0.80],
|
||||
[0.30, 0.70],
|
||||
]
|
||||
)
|
||||
)
|
||||
x_train = np.array([0, 1, 2, 3])
|
||||
y_train = np.array([1, 1, 0, 0])
|
||||
|
||||
threshold = MinPrecisionThreshold(min_precision=1.0)
|
||||
assert threshold.find(clf, x_train, y_train) == 0.90
|
||||
|
||||
threshold = MinPrecisionThreshold(min_precision=0.65)
|
||||
assert threshold.find(clf, x_train, y_train) == 0.80
|
||||
|
||||
threshold = MinPrecisionThreshold(min_precision=0.50)
|
||||
assert threshold.find(clf, x_train, y_train) == 0.70
|
||||
|
||||
threshold = MinPrecisionThreshold(min_precision=0.00)
|
||||
assert threshold.find(clf, x_train, y_train) == 0.70
|
||||
3
tests/components/__init__.py
Normal file
3
tests/components/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
0
tests/components/steps/__init__.py
Normal file
0
tests/components/steps/__init__.py
Normal file
123
tests/components/steps/test_convert_tight.py
Normal file
123
tests/components/steps/test_convert_tight.py
Normal file
@@ -0,0 +1,123 @@
|
||||
from unittest.mock import Mock
|
||||
|
||||
from miplearn.classifiers import Classifier
|
||||
from miplearn.components.steps.convert_tight import ConvertTightIneqsIntoEqsStep
|
||||
from miplearn.components.steps.relax_integrality import RelaxIntegralityStep
|
||||
from miplearn.instance import Instance
|
||||
from miplearn.problems.knapsack import GurobiKnapsackInstance
|
||||
from miplearn.solvers.gurobi import GurobiSolver
|
||||
from miplearn.solvers.learning import LearningSolver
|
||||
|
||||
|
||||
def test_convert_tight_usage():
|
||||
instance = GurobiKnapsackInstance(
|
||||
weights=[3.0, 5.0, 10.0],
|
||||
prices=[1.0, 1.0, 1.0],
|
||||
capacity=16.0,
|
||||
)
|
||||
solver = LearningSolver(
|
||||
solver=GurobiSolver,
|
||||
components=[
|
||||
RelaxIntegralityStep(),
|
||||
ConvertTightIneqsIntoEqsStep(),
|
||||
],
|
||||
)
|
||||
|
||||
# Solve original problem
|
||||
stats = solver.solve(instance)
|
||||
original_upper_bound = stats["Upper bound"]
|
||||
|
||||
# Should collect training data
|
||||
assert instance.training_data[0]["slacks"]["eq_capacity"] == 0.0
|
||||
|
||||
# Fit and resolve
|
||||
solver.fit([instance])
|
||||
stats = solver.solve(instance)
|
||||
|
||||
# Objective value should be the same
|
||||
assert stats["Upper bound"] == original_upper_bound
|
||||
assert stats["ConvertTight: Inf iterations"] == 0
|
||||
assert stats["ConvertTight: Subopt iterations"] == 0
|
||||
|
||||
|
||||
class SampleInstance(Instance):
|
||||
def to_model(self):
|
||||
import gurobipy as grb
|
||||
|
||||
m = grb.Model("model")
|
||||
x1 = m.addVar(name="x1")
|
||||
x2 = m.addVar(name="x2")
|
||||
m.setObjective(x1 + 2 * x2, grb.GRB.MAXIMIZE)
|
||||
m.addConstr(x1 <= 2, name="c1")
|
||||
m.addConstr(x2 <= 2, name="c2")
|
||||
m.addConstr(x1 + x2 <= 3, name="c2")
|
||||
return m
|
||||
|
||||
|
||||
def test_convert_tight_infeasibility():
|
||||
comp = ConvertTightIneqsIntoEqsStep()
|
||||
comp.classifiers = {
|
||||
"c1": Mock(spec=Classifier),
|
||||
"c2": Mock(spec=Classifier),
|
||||
"c3": Mock(spec=Classifier),
|
||||
}
|
||||
comp.classifiers["c1"].predict_proba = Mock(return_value=[[0, 1]])
|
||||
comp.classifiers["c2"].predict_proba = Mock(return_value=[[0, 1]])
|
||||
comp.classifiers["c3"].predict_proba = Mock(return_value=[[1, 0]])
|
||||
|
||||
solver = LearningSolver(
|
||||
solver=GurobiSolver,
|
||||
components=[comp],
|
||||
solve_lp_first=False,
|
||||
)
|
||||
instance = SampleInstance()
|
||||
stats = solver.solve(instance)
|
||||
assert stats["Upper bound"] == 5.0
|
||||
assert stats["ConvertTight: Inf iterations"] == 1
|
||||
assert stats["ConvertTight: Subopt iterations"] == 0
|
||||
|
||||
|
||||
def test_convert_tight_suboptimality():
|
||||
comp = ConvertTightIneqsIntoEqsStep(check_optimality=True)
|
||||
comp.classifiers = {
|
||||
"c1": Mock(spec=Classifier),
|
||||
"c2": Mock(spec=Classifier),
|
||||
"c3": Mock(spec=Classifier),
|
||||
}
|
||||
comp.classifiers["c1"].predict_proba = Mock(return_value=[[0, 1]])
|
||||
comp.classifiers["c2"].predict_proba = Mock(return_value=[[1, 0]])
|
||||
comp.classifiers["c3"].predict_proba = Mock(return_value=[[0, 1]])
|
||||
|
||||
solver = LearningSolver(
|
||||
solver=GurobiSolver,
|
||||
components=[comp],
|
||||
solve_lp_first=False,
|
||||
)
|
||||
instance = SampleInstance()
|
||||
stats = solver.solve(instance)
|
||||
assert stats["Upper bound"] == 5.0
|
||||
assert stats["ConvertTight: Inf iterations"] == 0
|
||||
assert stats["ConvertTight: Subopt iterations"] == 1
|
||||
|
||||
|
||||
def test_convert_tight_optimal():
|
||||
comp = ConvertTightIneqsIntoEqsStep()
|
||||
comp.classifiers = {
|
||||
"c1": Mock(spec=Classifier),
|
||||
"c2": Mock(spec=Classifier),
|
||||
"c3": Mock(spec=Classifier),
|
||||
}
|
||||
comp.classifiers["c1"].predict_proba = Mock(return_value=[[1, 0]])
|
||||
comp.classifiers["c2"].predict_proba = Mock(return_value=[[0, 1]])
|
||||
comp.classifiers["c3"].predict_proba = Mock(return_value=[[0, 1]])
|
||||
|
||||
solver = LearningSolver(
|
||||
solver=GurobiSolver,
|
||||
components=[comp],
|
||||
solve_lp_first=False,
|
||||
)
|
||||
instance = SampleInstance()
|
||||
stats = solver.solve(instance)
|
||||
assert stats["Upper bound"] == 5.0
|
||||
assert stats["ConvertTight: Inf iterations"] == 0
|
||||
assert stats["ConvertTight: Subopt iterations"] == 0
|
||||
364
tests/components/steps/test_drop_redundant.py
Normal file
364
tests/components/steps/test_drop_redundant.py
Normal file
@@ -0,0 +1,364 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from unittest.mock import Mock, call
|
||||
|
||||
import numpy as np
|
||||
|
||||
from miplearn.classifiers import Classifier
|
||||
from miplearn.components.relaxation import DropRedundantInequalitiesStep
|
||||
from miplearn.instance import Instance
|
||||
from miplearn.solvers.internal import InternalSolver
|
||||
from miplearn.solvers.learning import LearningSolver
|
||||
|
||||
|
||||
def _setup():
|
||||
solver = Mock(spec=LearningSolver)
|
||||
|
||||
internal = solver.internal_solver = Mock(spec=InternalSolver)
|
||||
internal.get_constraint_ids = Mock(return_value=["c1", "c2", "c3", "c4"])
|
||||
internal.get_inequality_slacks = Mock(
|
||||
side_effect=lambda: {
|
||||
"c1": 0.5,
|
||||
"c2": 0.0,
|
||||
"c3": 0.0,
|
||||
"c4": 1.4,
|
||||
}
|
||||
)
|
||||
internal.extract_constraint = Mock(side_effect=lambda cid: "<%s>" % cid)
|
||||
internal.is_constraint_satisfied = Mock(return_value=False)
|
||||
|
||||
instance = Mock(spec=Instance)
|
||||
instance.get_constraint_features = Mock(
|
||||
side_effect=lambda cid: {
|
||||
"c2": np.array([1.0, 0.0]),
|
||||
"c3": np.array([0.5, 0.5]),
|
||||
"c4": np.array([1.0]),
|
||||
}[cid]
|
||||
)
|
||||
instance.get_constraint_category = Mock(
|
||||
side_effect=lambda cid: {
|
||||
"c1": None,
|
||||
"c2": "type-a",
|
||||
"c3": "type-a",
|
||||
"c4": "type-b",
|
||||
}[cid]
|
||||
)
|
||||
|
||||
classifiers = {
|
||||
"type-a": Mock(spec=Classifier),
|
||||
"type-b": Mock(spec=Classifier),
|
||||
}
|
||||
classifiers["type-a"].predict_proba = Mock(
|
||||
return_value=np.array(
|
||||
[
|
||||
[0.20, 0.80],
|
||||
[0.05, 0.95],
|
||||
]
|
||||
)
|
||||
)
|
||||
classifiers["type-b"].predict_proba = Mock(
|
||||
return_value=np.array(
|
||||
[
|
||||
[0.02, 0.98],
|
||||
]
|
||||
)
|
||||
)
|
||||
|
||||
return solver, internal, instance, classifiers
|
||||
|
||||
|
||||
def test_drop_redundant():
|
||||
solver, internal, instance, classifiers = _setup()
|
||||
|
||||
component = DropRedundantInequalitiesStep()
|
||||
component.classifiers = classifiers
|
||||
|
||||
# LearningSolver calls before_solve
|
||||
component.before_solve(solver, instance, None)
|
||||
|
||||
# Should query list of constraints
|
||||
internal.get_constraint_ids.assert_called_once()
|
||||
|
||||
# Should query category and features for each constraint in the model
|
||||
assert instance.get_constraint_category.call_count == 4
|
||||
instance.get_constraint_category.assert_has_calls(
|
||||
[
|
||||
call("c1"),
|
||||
call("c2"),
|
||||
call("c3"),
|
||||
call("c4"),
|
||||
]
|
||||
)
|
||||
|
||||
# For constraint with non-null categories, should ask for features
|
||||
assert instance.get_constraint_features.call_count == 3
|
||||
instance.get_constraint_features.assert_has_calls(
|
||||
[
|
||||
call("c2"),
|
||||
call("c3"),
|
||||
call("c4"),
|
||||
]
|
||||
)
|
||||
|
||||
# Should ask ML to predict whether constraint should be removed
|
||||
type_a_actual = component.classifiers["type-a"].predict_proba.call_args[0][0]
|
||||
type_b_actual = component.classifiers["type-b"].predict_proba.call_args[0][0]
|
||||
np.testing.assert_array_equal(type_a_actual, np.array([[1.0, 0.0], [0.5, 0.5]]))
|
||||
np.testing.assert_array_equal(type_b_actual, np.array([[1.0]]))
|
||||
|
||||
# Should ask internal solver to remove constraints predicted as redundant
|
||||
assert internal.extract_constraint.call_count == 2
|
||||
internal.extract_constraint.assert_has_calls(
|
||||
[
|
||||
call("c3"),
|
||||
call("c4"),
|
||||
]
|
||||
)
|
||||
|
||||
# LearningSolver calls after_solve
|
||||
training_data = {}
|
||||
component.after_solve(solver, instance, None, {}, training_data)
|
||||
|
||||
# Should query slack for all inequalities
|
||||
internal.get_inequality_slacks.assert_called_once()
|
||||
|
||||
# Should store constraint slacks in instance object
|
||||
assert training_data["slacks"] == {
|
||||
"c1": 0.5,
|
||||
"c2": 0.0,
|
||||
"c3": 0.0,
|
||||
"c4": 1.4,
|
||||
}
|
||||
|
||||
|
||||
def test_drop_redundant_with_check_feasibility():
|
||||
solver, internal, instance, classifiers = _setup()
|
||||
|
||||
component = DropRedundantInequalitiesStep(
|
||||
check_feasibility=True,
|
||||
violation_tolerance=1e-3,
|
||||
)
|
||||
component.classifiers = classifiers
|
||||
|
||||
# LearningSolver call before_solve
|
||||
component.before_solve(solver, instance, None)
|
||||
|
||||
# Assert constraints are extracted
|
||||
assert internal.extract_constraint.call_count == 2
|
||||
internal.extract_constraint.assert_has_calls(
|
||||
[
|
||||
call("c3"),
|
||||
call("c4"),
|
||||
]
|
||||
)
|
||||
|
||||
# LearningSolver calls iteration_cb (first time)
|
||||
should_repeat = component.iteration_cb(solver, instance, None)
|
||||
|
||||
# Should ask LearningSolver to repeat
|
||||
assert should_repeat
|
||||
|
||||
# Should ask solver if removed constraints are satisfied (mock always returns false)
|
||||
internal.is_constraint_satisfied.assert_has_calls(
|
||||
[
|
||||
call("<c3>", 1e-3),
|
||||
call("<c4>", 1e-3),
|
||||
]
|
||||
)
|
||||
|
||||
# Should add constraints back to LP relaxation
|
||||
internal.add_constraint.assert_has_calls([call("<c3>"), call("<c4>")])
|
||||
|
||||
# LearningSolver calls iteration_cb (second time)
|
||||
should_repeat = component.iteration_cb(solver, instance, None)
|
||||
assert not should_repeat
|
||||
|
||||
|
||||
def test_x_y_fit_predict_evaluate():
|
||||
instances = [Mock(spec=Instance), Mock(spec=Instance)]
|
||||
component = DropRedundantInequalitiesStep(slack_tolerance=0.05, threshold=0.80)
|
||||
component.classifiers = {
|
||||
"type-a": Mock(spec=Classifier),
|
||||
"type-b": Mock(spec=Classifier),
|
||||
}
|
||||
component.classifiers["type-a"].predict_proba = Mock(
|
||||
return_value=[
|
||||
np.array([0.20, 0.80]),
|
||||
]
|
||||
)
|
||||
component.classifiers["type-b"].predict_proba = Mock(
|
||||
return_value=np.array(
|
||||
[
|
||||
[0.50, 0.50],
|
||||
[0.05, 0.95],
|
||||
]
|
||||
)
|
||||
)
|
||||
|
||||
# First mock instance
|
||||
instances[0].training_data = [
|
||||
{
|
||||
"slacks": {
|
||||
"c1": 0.00,
|
||||
"c2": 0.05,
|
||||
"c3": 0.00,
|
||||
"c4": 30.0,
|
||||
}
|
||||
}
|
||||
]
|
||||
instances[0].get_constraint_category = Mock(
|
||||
side_effect=lambda cid: {
|
||||
"c1": None,
|
||||
"c2": "type-a",
|
||||
"c3": "type-a",
|
||||
"c4": "type-b",
|
||||
}[cid]
|
||||
)
|
||||
instances[0].get_constraint_features = Mock(
|
||||
side_effect=lambda cid: {
|
||||
"c2": np.array([1.0, 0.0]),
|
||||
"c3": np.array([0.5, 0.5]),
|
||||
"c4": np.array([1.0]),
|
||||
}[cid]
|
||||
)
|
||||
|
||||
# Second mock instance
|
||||
instances[1].training_data = [
|
||||
{
|
||||
"slacks": {
|
||||
"c1": 0.00,
|
||||
"c3": 0.30,
|
||||
"c4": 0.00,
|
||||
"c5": 0.00,
|
||||
}
|
||||
}
|
||||
]
|
||||
instances[1].get_constraint_category = Mock(
|
||||
side_effect=lambda cid: {
|
||||
"c1": None,
|
||||
"c3": "type-a",
|
||||
"c4": "type-b",
|
||||
"c5": "type-b",
|
||||
}[cid]
|
||||
)
|
||||
instances[1].get_constraint_features = Mock(
|
||||
side_effect=lambda cid: {
|
||||
"c3": np.array([0.3, 0.4]),
|
||||
"c4": np.array([0.7]),
|
||||
"c5": np.array([0.8]),
|
||||
}[cid]
|
||||
)
|
||||
|
||||
expected_x = {
|
||||
"type-a": np.array(
|
||||
[
|
||||
[1.0, 0.0],
|
||||
[0.5, 0.5],
|
||||
[0.3, 0.4],
|
||||
]
|
||||
),
|
||||
"type-b": np.array(
|
||||
[
|
||||
[1.0],
|
||||
[0.7],
|
||||
[0.8],
|
||||
]
|
||||
),
|
||||
}
|
||||
expected_y = {
|
||||
"type-a": np.array([[0], [0], [1]]),
|
||||
"type-b": np.array([[1], [0], [0]]),
|
||||
}
|
||||
|
||||
# Should build X and Y matrices correctly
|
||||
actual_x = component.x(instances)
|
||||
actual_y = component.y(instances)
|
||||
for category in ["type-a", "type-b"]:
|
||||
np.testing.assert_array_equal(actual_x[category], expected_x[category])
|
||||
np.testing.assert_array_equal(actual_y[category], expected_y[category])
|
||||
|
||||
# Should pass along X and Y matrices to classifiers
|
||||
component.fit(instances)
|
||||
for category in ["type-a", "type-b"]:
|
||||
actual_x = component.classifiers[category].fit.call_args[0][0]
|
||||
actual_y = component.classifiers[category].fit.call_args[0][1]
|
||||
np.testing.assert_array_equal(actual_x, expected_x[category])
|
||||
np.testing.assert_array_equal(actual_y, expected_y[category])
|
||||
|
||||
assert component.predict(expected_x) == {"type-a": [[1]], "type-b": [[0], [1]]}
|
||||
|
||||
ev = component.evaluate(instances[1])
|
||||
assert ev["True positive"] == 1
|
||||
assert ev["True negative"] == 1
|
||||
assert ev["False positive"] == 1
|
||||
assert ev["False negative"] == 0
|
||||
|
||||
|
||||
def test_x_multiple_solves():
|
||||
instance = Mock(spec=Instance)
|
||||
instance.training_data = [
|
||||
{
|
||||
"slacks": {
|
||||
"c1": 0.00,
|
||||
"c2": 0.05,
|
||||
"c3": 0.00,
|
||||
"c4": 30.0,
|
||||
}
|
||||
},
|
||||
{
|
||||
"slacks": {
|
||||
"c1": 0.00,
|
||||
"c2": 0.00,
|
||||
"c3": 1.00,
|
||||
"c4": 0.0,
|
||||
}
|
||||
},
|
||||
]
|
||||
instance.get_constraint_category = Mock(
|
||||
side_effect=lambda cid: {
|
||||
"c1": None,
|
||||
"c2": "type-a",
|
||||
"c3": "type-a",
|
||||
"c4": "type-b",
|
||||
}[cid]
|
||||
)
|
||||
instance.get_constraint_features = Mock(
|
||||
side_effect=lambda cid: {
|
||||
"c2": np.array([1.0, 0.0]),
|
||||
"c3": np.array([0.5, 0.5]),
|
||||
"c4": np.array([1.0]),
|
||||
}[cid]
|
||||
)
|
||||
|
||||
expected_x = {
|
||||
"type-a": np.array(
|
||||
[
|
||||
[1.0, 0.0],
|
||||
[0.5, 0.5],
|
||||
[1.0, 0.0],
|
||||
[0.5, 0.5],
|
||||
]
|
||||
),
|
||||
"type-b": np.array(
|
||||
[
|
||||
[1.0],
|
||||
[1.0],
|
||||
]
|
||||
),
|
||||
}
|
||||
|
||||
expected_y = {
|
||||
"type-a": np.array([[1], [0], [0], [1]]),
|
||||
"type-b": np.array([[1], [0]]),
|
||||
}
|
||||
|
||||
# Should build X and Y matrices correctly
|
||||
component = DropRedundantInequalitiesStep()
|
||||
actual_x = component.x([instance])
|
||||
actual_y = component.y([instance])
|
||||
print(actual_x)
|
||||
for category in ["type-a", "type-b"]:
|
||||
np.testing.assert_array_equal(actual_x[category], expected_x[category])
|
||||
np.testing.assert_array_equal(actual_y[category], expected_y[category])
|
||||
57
tests/components/test_composite.py
Normal file
57
tests/components/test_composite.py
Normal file
@@ -0,0 +1,57 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from unittest.mock import Mock, call
|
||||
|
||||
from miplearn.components.component import Component
|
||||
from miplearn.components.composite import CompositeComponent
|
||||
from miplearn.instance import Instance
|
||||
from miplearn.solvers.learning import LearningSolver
|
||||
|
||||
|
||||
def test_composite():
|
||||
solver, instance, model = (
|
||||
Mock(spec=LearningSolver),
|
||||
Mock(spec=Instance),
|
||||
Mock(),
|
||||
)
|
||||
|
||||
c1 = Mock(spec=Component)
|
||||
c2 = Mock(spec=Component)
|
||||
cc = CompositeComponent([c1, c2])
|
||||
|
||||
# Should broadcast before_solve
|
||||
cc.before_solve(solver, instance, model)
|
||||
c1.before_solve.assert_has_calls([call(solver, instance, model)])
|
||||
c2.before_solve.assert_has_calls([call(solver, instance, model)])
|
||||
|
||||
# Should broadcast after_solve
|
||||
cc.after_solve(solver, instance, model, {}, {})
|
||||
c1.after_solve.assert_has_calls([call(solver, instance, model, {}, {})])
|
||||
c2.after_solve.assert_has_calls([call(solver, instance, model, {}, {})])
|
||||
|
||||
# Should broadcast fit
|
||||
cc.fit([1, 2, 3])
|
||||
c1.fit.assert_has_calls([call([1, 2, 3])])
|
||||
c2.fit.assert_has_calls([call([1, 2, 3])])
|
||||
|
||||
# Should broadcast lazy_cb
|
||||
cc.lazy_cb(solver, instance, model)
|
||||
c1.lazy_cb.assert_has_calls([call(solver, instance, model)])
|
||||
c2.lazy_cb.assert_has_calls([call(solver, instance, model)])
|
||||
|
||||
# Should broadcast iteration_cb
|
||||
cc.iteration_cb(solver, instance, model)
|
||||
c1.iteration_cb.assert_has_calls([call(solver, instance, model)])
|
||||
c2.iteration_cb.assert_has_calls([call(solver, instance, model)])
|
||||
|
||||
# If at least one child component returns true, iteration_cb should return True
|
||||
c1.iteration_cb = Mock(return_value=True)
|
||||
c2.iteration_cb = Mock(return_value=False)
|
||||
assert cc.iteration_cb(solver, instance, model)
|
||||
|
||||
# If all children return False, iteration_cb should return False
|
||||
c1.iteration_cb = Mock(return_value=False)
|
||||
c2.iteration_cb = Mock(return_value=False)
|
||||
assert not cc.iteration_cb(solver, instance, model)
|
||||
143
tests/components/test_lazy_dynamic.py
Normal file
143
tests/components/test_lazy_dynamic.py
Normal file
@@ -0,0 +1,143 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from unittest.mock import Mock
|
||||
|
||||
import numpy as np
|
||||
from numpy.linalg import norm
|
||||
|
||||
from miplearn.classifiers import Classifier
|
||||
from miplearn.components.lazy_dynamic import DynamicLazyConstraintsComponent
|
||||
from miplearn.solvers.internal import InternalSolver
|
||||
from miplearn.solvers.learning import LearningSolver
|
||||
from .. import get_test_pyomo_instances
|
||||
|
||||
E = 0.1
|
||||
|
||||
|
||||
def test_lazy_fit():
|
||||
instances, models = get_test_pyomo_instances()
|
||||
instances[0].found_violated_lazy_constraints = ["a", "b"]
|
||||
instances[1].found_violated_lazy_constraints = ["b", "c"]
|
||||
classifier = Mock(spec=Classifier)
|
||||
component = DynamicLazyConstraintsComponent(classifier=classifier)
|
||||
|
||||
component.fit(instances)
|
||||
|
||||
# Should create one classifier for each violation
|
||||
assert "a" in component.classifiers
|
||||
assert "b" in component.classifiers
|
||||
assert "c" in component.classifiers
|
||||
|
||||
# Should provide correct x_train to each classifier
|
||||
expected_x_train_a = np.array([[67.0, 21.75, 1287.92], [70.0, 23.75, 1199.83]])
|
||||
expected_x_train_b = np.array([[67.0, 21.75, 1287.92], [70.0, 23.75, 1199.83]])
|
||||
expected_x_train_c = np.array([[67.0, 21.75, 1287.92], [70.0, 23.75, 1199.83]])
|
||||
actual_x_train_a = component.classifiers["a"].fit.call_args[0][0]
|
||||
actual_x_train_b = component.classifiers["b"].fit.call_args[0][0]
|
||||
actual_x_train_c = component.classifiers["c"].fit.call_args[0][0]
|
||||
assert norm(expected_x_train_a - actual_x_train_a) < E
|
||||
assert norm(expected_x_train_b - actual_x_train_b) < E
|
||||
assert norm(expected_x_train_c - actual_x_train_c) < E
|
||||
|
||||
# Should provide correct y_train to each classifier
|
||||
expected_y_train_a = np.array([1.0, 0.0])
|
||||
expected_y_train_b = np.array([1.0, 1.0])
|
||||
expected_y_train_c = np.array([0.0, 1.0])
|
||||
actual_y_train_a = component.classifiers["a"].fit.call_args[0][1]
|
||||
actual_y_train_b = component.classifiers["b"].fit.call_args[0][1]
|
||||
actual_y_train_c = component.classifiers["c"].fit.call_args[0][1]
|
||||
assert norm(expected_y_train_a - actual_y_train_a) < E
|
||||
assert norm(expected_y_train_b - actual_y_train_b) < E
|
||||
assert norm(expected_y_train_c - actual_y_train_c) < E
|
||||
|
||||
|
||||
def test_lazy_before():
|
||||
instances, models = get_test_pyomo_instances()
|
||||
instances[0].build_lazy_constraint = Mock(return_value="c1")
|
||||
solver = LearningSolver()
|
||||
solver.internal_solver = Mock(spec=InternalSolver)
|
||||
component = DynamicLazyConstraintsComponent(threshold=0.10)
|
||||
component.classifiers = {"a": Mock(spec=Classifier), "b": Mock(spec=Classifier)}
|
||||
component.classifiers["a"].predict_proba = Mock(return_value=[[0.95, 0.05]])
|
||||
component.classifiers["b"].predict_proba = Mock(return_value=[[0.02, 0.80]])
|
||||
|
||||
component.before_solve(solver, instances[0], models[0])
|
||||
|
||||
# Should ask classifier likelihood of each constraint being violated
|
||||
expected_x_test_a = np.array([[67.0, 21.75, 1287.92]])
|
||||
expected_x_test_b = np.array([[67.0, 21.75, 1287.92]])
|
||||
actual_x_test_a = component.classifiers["a"].predict_proba.call_args[0][0]
|
||||
actual_x_test_b = component.classifiers["b"].predict_proba.call_args[0][0]
|
||||
assert norm(expected_x_test_a - actual_x_test_a) < E
|
||||
assert norm(expected_x_test_b - actual_x_test_b) < E
|
||||
|
||||
# Should ask instance to generate cut for constraints whose likelihood
|
||||
# of being violated exceeds the threshold
|
||||
instances[0].build_lazy_constraint.assert_called_once_with(models[0], "b")
|
||||
|
||||
# Should ask internal solver to add generated constraint
|
||||
solver.internal_solver.add_constraint.assert_called_once_with("c1")
|
||||
|
||||
|
||||
def test_lazy_evaluate():
|
||||
instances, models = get_test_pyomo_instances()
|
||||
component = DynamicLazyConstraintsComponent()
|
||||
component.classifiers = {
|
||||
"a": Mock(spec=Classifier),
|
||||
"b": Mock(spec=Classifier),
|
||||
"c": Mock(spec=Classifier),
|
||||
}
|
||||
component.classifiers["a"].predict_proba = Mock(return_value=[[1.0, 0.0]])
|
||||
component.classifiers["b"].predict_proba = Mock(return_value=[[0.0, 1.0]])
|
||||
component.classifiers["c"].predict_proba = Mock(return_value=[[0.0, 1.0]])
|
||||
|
||||
instances[0].found_violated_lazy_constraints = ["a", "b", "c"]
|
||||
instances[1].found_violated_lazy_constraints = ["b", "d"]
|
||||
assert component.evaluate(instances) == {
|
||||
0: {
|
||||
"Accuracy": 0.75,
|
||||
"F1 score": 0.8,
|
||||
"Precision": 1.0,
|
||||
"Recall": 2 / 3.0,
|
||||
"Predicted positive": 2,
|
||||
"Predicted negative": 2,
|
||||
"Condition positive": 3,
|
||||
"Condition negative": 1,
|
||||
"False negative": 1,
|
||||
"False positive": 0,
|
||||
"True negative": 1,
|
||||
"True positive": 2,
|
||||
"Predicted positive (%)": 50.0,
|
||||
"Predicted negative (%)": 50.0,
|
||||
"Condition positive (%)": 75.0,
|
||||
"Condition negative (%)": 25.0,
|
||||
"False negative (%)": 25.0,
|
||||
"False positive (%)": 0,
|
||||
"True negative (%)": 25.0,
|
||||
"True positive (%)": 50.0,
|
||||
},
|
||||
1: {
|
||||
"Accuracy": 0.5,
|
||||
"F1 score": 0.5,
|
||||
"Precision": 0.5,
|
||||
"Recall": 0.5,
|
||||
"Predicted positive": 2,
|
||||
"Predicted negative": 2,
|
||||
"Condition positive": 2,
|
||||
"Condition negative": 2,
|
||||
"False negative": 1,
|
||||
"False positive": 1,
|
||||
"True negative": 1,
|
||||
"True positive": 1,
|
||||
"Predicted positive (%)": 50.0,
|
||||
"Predicted negative (%)": 50.0,
|
||||
"Condition positive (%)": 50.0,
|
||||
"Condition negative (%)": 50.0,
|
||||
"False negative (%)": 25.0,
|
||||
"False positive (%)": 25.0,
|
||||
"True negative (%)": 25.0,
|
||||
"True positive (%)": 25.0,
|
||||
},
|
||||
}
|
||||
232
tests/components/test_lazy_static.py
Normal file
232
tests/components/test_lazy_static.py
Normal file
@@ -0,0 +1,232 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from unittest.mock import Mock, call
|
||||
|
||||
from miplearn.classifiers import Classifier
|
||||
from miplearn.components.lazy_static import StaticLazyConstraintsComponent
|
||||
from miplearn.instance import Instance
|
||||
from miplearn.solvers.internal import InternalSolver
|
||||
from miplearn.solvers.learning import LearningSolver
|
||||
|
||||
|
||||
def test_usage_with_solver():
|
||||
solver = Mock(spec=LearningSolver)
|
||||
solver.use_lazy_cb = False
|
||||
solver.gap_tolerance = 1e-4
|
||||
|
||||
internal = solver.internal_solver = Mock(spec=InternalSolver)
|
||||
internal.get_constraint_ids = Mock(return_value=["c1", "c2", "c3", "c4"])
|
||||
internal.extract_constraint = Mock(side_effect=lambda cid: "<%s>" % cid)
|
||||
internal.is_constraint_satisfied = Mock(return_value=False)
|
||||
|
||||
instance = Mock(spec=Instance)
|
||||
instance.has_static_lazy_constraints = Mock(return_value=True)
|
||||
instance.is_constraint_lazy = Mock(
|
||||
side_effect=lambda cid: {
|
||||
"c1": False,
|
||||
"c2": True,
|
||||
"c3": True,
|
||||
"c4": True,
|
||||
}[cid]
|
||||
)
|
||||
instance.get_constraint_features = Mock(
|
||||
side_effect=lambda cid: {
|
||||
"c2": [1.0, 0.0],
|
||||
"c3": [0.5, 0.5],
|
||||
"c4": [1.0],
|
||||
}[cid]
|
||||
)
|
||||
instance.get_constraint_category = Mock(
|
||||
side_effect=lambda cid: {
|
||||
"c2": "type-a",
|
||||
"c3": "type-a",
|
||||
"c4": "type-b",
|
||||
}[cid]
|
||||
)
|
||||
|
||||
component = StaticLazyConstraintsComponent(
|
||||
threshold=0.90,
|
||||
use_two_phase_gap=False,
|
||||
violation_tolerance=1.0,
|
||||
)
|
||||
component.classifiers = {
|
||||
"type-a": Mock(spec=Classifier),
|
||||
"type-b": Mock(spec=Classifier),
|
||||
}
|
||||
component.classifiers["type-a"].predict_proba = Mock(
|
||||
return_value=[
|
||||
[0.20, 0.80],
|
||||
[0.05, 0.95],
|
||||
]
|
||||
)
|
||||
component.classifiers["type-b"].predict_proba = Mock(
|
||||
return_value=[
|
||||
[0.02, 0.98],
|
||||
]
|
||||
)
|
||||
|
||||
# LearningSolver calls before_solve
|
||||
component.before_solve(solver, instance, None)
|
||||
|
||||
# Should ask if instance has static lazy constraints
|
||||
instance.has_static_lazy_constraints.assert_called_once()
|
||||
|
||||
# Should ask internal solver for a list of constraints in the model
|
||||
internal.get_constraint_ids.assert_called_once()
|
||||
|
||||
# Should ask if each constraint in the model is lazy
|
||||
instance.is_constraint_lazy.assert_has_calls(
|
||||
[
|
||||
call("c1"),
|
||||
call("c2"),
|
||||
call("c3"),
|
||||
call("c4"),
|
||||
]
|
||||
)
|
||||
|
||||
# For the lazy ones, should ask for features
|
||||
instance.get_constraint_features.assert_has_calls(
|
||||
[
|
||||
call("c2"),
|
||||
call("c3"),
|
||||
call("c4"),
|
||||
]
|
||||
)
|
||||
|
||||
# Should also ask for categories
|
||||
assert instance.get_constraint_category.call_count == 3
|
||||
instance.get_constraint_category.assert_has_calls(
|
||||
[
|
||||
call("c2"),
|
||||
call("c3"),
|
||||
call("c4"),
|
||||
]
|
||||
)
|
||||
|
||||
# Should ask internal solver to remove constraints identified as lazy
|
||||
assert internal.extract_constraint.call_count == 3
|
||||
internal.extract_constraint.assert_has_calls(
|
||||
[
|
||||
call("c2"),
|
||||
call("c3"),
|
||||
call("c4"),
|
||||
]
|
||||
)
|
||||
|
||||
# Should ask ML to predict whether each lazy constraint should be enforced
|
||||
component.classifiers["type-a"].predict_proba.assert_called_once_with(
|
||||
[[1.0, 0.0], [0.5, 0.5]]
|
||||
)
|
||||
component.classifiers["type-b"].predict_proba.assert_called_once_with([[1.0]])
|
||||
|
||||
# For the ones that should be enforced, should ask solver to re-add them
|
||||
# to the formulation. The remaining ones should remain in the pool.
|
||||
assert internal.add_constraint.call_count == 2
|
||||
internal.add_constraint.assert_has_calls(
|
||||
[
|
||||
call("<c3>"),
|
||||
call("<c4>"),
|
||||
]
|
||||
)
|
||||
internal.add_constraint.reset_mock()
|
||||
|
||||
# LearningSolver calls after_iteration (first time)
|
||||
should_repeat = component.iteration_cb(solver, instance, None)
|
||||
assert should_repeat
|
||||
|
||||
# Should ask internal solver to verify if constraints in the pool are
|
||||
# satisfied and add the ones that are not
|
||||
internal.is_constraint_satisfied.assert_called_once_with("<c2>", tol=1.0)
|
||||
internal.is_constraint_satisfied.reset_mock()
|
||||
internal.add_constraint.assert_called_once_with("<c2>")
|
||||
internal.add_constraint.reset_mock()
|
||||
|
||||
# LearningSolver calls after_iteration (second time)
|
||||
should_repeat = component.iteration_cb(solver, instance, None)
|
||||
assert not should_repeat
|
||||
|
||||
# The lazy constraint pool should be empty by now, so no calls should be made
|
||||
internal.is_constraint_satisfied.assert_not_called()
|
||||
internal.add_constraint.assert_not_called()
|
||||
|
||||
# Should update instance object
|
||||
assert instance.found_violated_lazy_constraints == ["c3", "c4", "c2"]
|
||||
|
||||
|
||||
def test_fit():
|
||||
instance_1 = Mock(spec=Instance)
|
||||
instance_1.found_violated_lazy_constraints = ["c1", "c2", "c4", "c5"]
|
||||
instance_1.get_constraint_category = Mock(
|
||||
side_effect=lambda cid: {
|
||||
"c1": "type-a",
|
||||
"c2": "type-a",
|
||||
"c3": "type-a",
|
||||
"c4": "type-b",
|
||||
"c5": "type-b",
|
||||
}[cid]
|
||||
)
|
||||
instance_1.get_constraint_features = Mock(
|
||||
side_effect=lambda cid: {
|
||||
"c1": [1, 1],
|
||||
"c2": [1, 2],
|
||||
"c3": [1, 3],
|
||||
"c4": [1, 4, 0],
|
||||
"c5": [1, 5, 0],
|
||||
}[cid]
|
||||
)
|
||||
|
||||
instance_2 = Mock(spec=Instance)
|
||||
instance_2.found_violated_lazy_constraints = ["c2", "c3", "c4"]
|
||||
instance_2.get_constraint_category = Mock(
|
||||
side_effect=lambda cid: {
|
||||
"c1": "type-a",
|
||||
"c2": "type-a",
|
||||
"c3": "type-a",
|
||||
"c4": "type-b",
|
||||
"c5": "type-b",
|
||||
}[cid]
|
||||
)
|
||||
instance_2.get_constraint_features = Mock(
|
||||
side_effect=lambda cid: {
|
||||
"c1": [2, 1],
|
||||
"c2": [2, 2],
|
||||
"c3": [2, 3],
|
||||
"c4": [2, 4, 0],
|
||||
"c5": [2, 5, 0],
|
||||
}[cid]
|
||||
)
|
||||
|
||||
instances = [instance_1, instance_2]
|
||||
component = StaticLazyConstraintsComponent()
|
||||
component.classifiers = {
|
||||
"type-a": Mock(spec=Classifier),
|
||||
"type-b": Mock(spec=Classifier),
|
||||
}
|
||||
|
||||
expected_constraints = {
|
||||
"type-a": ["c1", "c2", "c3"],
|
||||
"type-b": ["c4", "c5"],
|
||||
}
|
||||
expected_x = {
|
||||
"type-a": [[1, 1], [1, 2], [1, 3], [2, 1], [2, 2], [2, 3]],
|
||||
"type-b": [[1, 4, 0], [1, 5, 0], [2, 4, 0], [2, 5, 0]],
|
||||
}
|
||||
expected_y = {
|
||||
"type-a": [[0, 1], [0, 1], [1, 0], [1, 0], [0, 1], [0, 1]],
|
||||
"type-b": [[0, 1], [0, 1], [0, 1], [1, 0]],
|
||||
}
|
||||
assert component._collect_constraints(instances) == expected_constraints
|
||||
assert component.x(instances) == expected_x
|
||||
assert component.y(instances) == expected_y
|
||||
|
||||
component.fit(instances)
|
||||
component.classifiers["type-a"].fit.assert_called_once_with(
|
||||
expected_x["type-a"],
|
||||
expected_y["type-a"],
|
||||
)
|
||||
component.classifiers["type-b"].fit.assert_called_once_with(
|
||||
expected_x["type-b"],
|
||||
expected_y["type-b"],
|
||||
)
|
||||
50
tests/components/test_objective.py
Normal file
50
tests/components/test_objective.py
Normal file
@@ -0,0 +1,50 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from unittest.mock import Mock
|
||||
|
||||
import numpy as np
|
||||
|
||||
from miplearn.classifiers import Regressor
|
||||
from miplearn.components.objective import ObjectiveValueComponent
|
||||
from .. import get_test_pyomo_instances
|
||||
|
||||
|
||||
def test_usage():
|
||||
instances, models = get_test_pyomo_instances()
|
||||
comp = ObjectiveValueComponent()
|
||||
comp.fit(instances)
|
||||
assert instances[0].training_data[0]["Lower bound"] == 1183.0
|
||||
assert instances[0].training_data[0]["Upper bound"] == 1183.0
|
||||
assert np.round(comp.predict(instances), 2).tolist() == [
|
||||
[1183.0, 1183.0],
|
||||
[1070.0, 1070.0],
|
||||
]
|
||||
|
||||
|
||||
def test_obj_evaluate():
|
||||
instances, models = get_test_pyomo_instances()
|
||||
reg = Mock(spec=Regressor)
|
||||
reg.predict = Mock(return_value=np.array([1000.0, 1000.0]))
|
||||
comp = ObjectiveValueComponent(regressor=reg)
|
||||
comp.fit(instances)
|
||||
ev = comp.evaluate(instances)
|
||||
assert ev == {
|
||||
"Lower bound": {
|
||||
"Explained variance": 0.0,
|
||||
"Max error": 183.0,
|
||||
"Mean absolute error": 126.5,
|
||||
"Mean squared error": 19194.5,
|
||||
"Median absolute error": 126.5,
|
||||
"R2": -5.012843605607331,
|
||||
},
|
||||
"Upper bound": {
|
||||
"Explained variance": 0.0,
|
||||
"Max error": 183.0,
|
||||
"Mean absolute error": 126.5,
|
||||
"Mean squared error": 19194.5,
|
||||
"Median absolute error": 126.5,
|
||||
"R2": -5.012843605607331,
|
||||
},
|
||||
}
|
||||
111
tests/components/test_primal.py
Normal file
111
tests/components/test_primal.py
Normal file
@@ -0,0 +1,111 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from unittest.mock import Mock
|
||||
|
||||
import numpy as np
|
||||
|
||||
from miplearn.classifiers import Classifier
|
||||
from miplearn.components.primal import PrimalSolutionComponent
|
||||
from .. import get_test_pyomo_instances
|
||||
|
||||
|
||||
def test_predict():
|
||||
instances, models = get_test_pyomo_instances()
|
||||
comp = PrimalSolutionComponent()
|
||||
comp.fit(instances)
|
||||
solution = comp.predict(instances[0])
|
||||
assert "x" in solution
|
||||
assert 0 in solution["x"]
|
||||
assert 1 in solution["x"]
|
||||
assert 2 in solution["x"]
|
||||
assert 3 in solution["x"]
|
||||
|
||||
|
||||
def test_evaluate():
|
||||
instances, models = get_test_pyomo_instances()
|
||||
clf_zero = Mock(spec=Classifier)
|
||||
clf_zero.predict_proba = Mock(
|
||||
return_value=np.array(
|
||||
[
|
||||
[0.0, 1.0], # x[0]
|
||||
[0.0, 1.0], # x[1]
|
||||
[1.0, 0.0], # x[2]
|
||||
[1.0, 0.0], # x[3]
|
||||
]
|
||||
)
|
||||
)
|
||||
clf_one = Mock(spec=Classifier)
|
||||
clf_one.predict_proba = Mock(
|
||||
return_value=np.array(
|
||||
[
|
||||
[1.0, 0.0], # x[0] instances[0]
|
||||
[1.0, 0.0], # x[1] instances[0]
|
||||
[0.0, 1.0], # x[2] instances[0]
|
||||
[1.0, 0.0], # x[3] instances[0]
|
||||
]
|
||||
)
|
||||
)
|
||||
comp = PrimalSolutionComponent(classifier=[clf_zero, clf_one], threshold=0.50)
|
||||
comp.fit(instances[:1])
|
||||
assert comp.predict(instances[0]) == {"x": {0: 0, 1: 0, 2: 1, 3: None}}
|
||||
assert instances[0].training_data[0]["Solution"] == {"x": {0: 1, 1: 0, 2: 1, 3: 1}}
|
||||
ev = comp.evaluate(instances[:1])
|
||||
assert ev == {
|
||||
"Fix one": {
|
||||
0: {
|
||||
"Accuracy": 0.5,
|
||||
"Condition negative": 1,
|
||||
"Condition negative (%)": 25.0,
|
||||
"Condition positive": 3,
|
||||
"Condition positive (%)": 75.0,
|
||||
"F1 score": 0.5,
|
||||
"False negative": 2,
|
||||
"False negative (%)": 50.0,
|
||||
"False positive": 0,
|
||||
"False positive (%)": 0.0,
|
||||
"Precision": 1.0,
|
||||
"Predicted negative": 3,
|
||||
"Predicted negative (%)": 75.0,
|
||||
"Predicted positive": 1,
|
||||
"Predicted positive (%)": 25.0,
|
||||
"Recall": 0.3333333333333333,
|
||||
"True negative": 1,
|
||||
"True negative (%)": 25.0,
|
||||
"True positive": 1,
|
||||
"True positive (%)": 25.0,
|
||||
}
|
||||
},
|
||||
"Fix zero": {
|
||||
0: {
|
||||
"Accuracy": 0.75,
|
||||
"Condition negative": 3,
|
||||
"Condition negative (%)": 75.0,
|
||||
"Condition positive": 1,
|
||||
"Condition positive (%)": 25.0,
|
||||
"F1 score": 0.6666666666666666,
|
||||
"False negative": 0,
|
||||
"False negative (%)": 0.0,
|
||||
"False positive": 1,
|
||||
"False positive (%)": 25.0,
|
||||
"Precision": 0.5,
|
||||
"Predicted negative": 2,
|
||||
"Predicted negative (%)": 50.0,
|
||||
"Predicted positive": 2,
|
||||
"Predicted positive (%)": 50.0,
|
||||
"Recall": 1.0,
|
||||
"True negative": 2,
|
||||
"True negative (%)": 50.0,
|
||||
"True positive": 1,
|
||||
"True positive (%)": 25.0,
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def test_primal_parallel_fit():
|
||||
instances, models = get_test_pyomo_instances()
|
||||
comp = PrimalSolutionComponent()
|
||||
comp.fit(instances, n_jobs=2)
|
||||
assert len(comp.classifiers) == 2
|
||||
3
tests/problems/__init__.py
Normal file
3
tests/problems/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
24
tests/problems/test_knapsack.py
Normal file
24
tests/problems/test_knapsack.py
Normal file
@@ -0,0 +1,24 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
import numpy as np
|
||||
from scipy.stats import uniform, randint
|
||||
|
||||
from miplearn.problems.knapsack import MultiKnapsackGenerator
|
||||
|
||||
|
||||
def test_knapsack_generator():
|
||||
gen = MultiKnapsackGenerator(
|
||||
n=randint(low=100, high=101),
|
||||
m=randint(low=30, high=31),
|
||||
w=randint(low=0, high=1000),
|
||||
K=randint(low=500, high=501),
|
||||
u=uniform(loc=1.0, scale=1.0),
|
||||
alpha=uniform(loc=0.50, scale=0.0),
|
||||
)
|
||||
instances = gen.generate(100)
|
||||
w_sum = sum(instance.weights for instance in instances) / len(instances)
|
||||
b_sum = sum(instance.capacities for instance in instances) / len(instances)
|
||||
assert round(np.mean(w_sum), -1) == 500.0
|
||||
assert round(np.mean(b_sum), -3) == 25000.0
|
||||
53
tests/problems/test_stab.py
Normal file
53
tests/problems/test_stab.py
Normal file
@@ -0,0 +1,53 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
import networkx as nx
|
||||
import numpy as np
|
||||
from scipy.stats import uniform, randint
|
||||
|
||||
from miplearn.problems.stab import MaxWeightStableSetInstance
|
||||
from miplearn.solvers.learning import LearningSolver
|
||||
|
||||
|
||||
def test_stab():
|
||||
graph = nx.cycle_graph(5)
|
||||
weights = [1.0, 1.0, 1.0, 1.0, 1.0]
|
||||
instance = MaxWeightStableSetInstance(graph, weights)
|
||||
solver = LearningSolver()
|
||||
stats = solver.solve(instance)
|
||||
assert stats["Lower bound"] == 2.0
|
||||
|
||||
|
||||
def test_stab_generator_fixed_graph():
|
||||
np.random.seed(42)
|
||||
from miplearn.problems.stab import MaxWeightStableSetGenerator
|
||||
|
||||
gen = MaxWeightStableSetGenerator(
|
||||
w=uniform(loc=50.0, scale=10.0),
|
||||
n=randint(low=10, high=11),
|
||||
p=uniform(loc=0.05, scale=0.0),
|
||||
fix_graph=True,
|
||||
)
|
||||
instances = gen.generate(1_000)
|
||||
weights = np.array([instance.weights for instance in instances])
|
||||
weights_avg_actual = np.round(np.average(weights, axis=0))
|
||||
weights_avg_expected = [55.0] * 10
|
||||
assert list(weights_avg_actual) == weights_avg_expected
|
||||
|
||||
|
||||
def test_stab_generator_random_graph():
|
||||
np.random.seed(42)
|
||||
from miplearn.problems.stab import MaxWeightStableSetGenerator
|
||||
|
||||
gen = MaxWeightStableSetGenerator(
|
||||
w=uniform(loc=50.0, scale=10.0),
|
||||
n=randint(low=30, high=41),
|
||||
p=uniform(loc=0.5, scale=0.0),
|
||||
fix_graph=False,
|
||||
)
|
||||
instances = gen.generate(1_000)
|
||||
n_nodes = [instance.graph.number_of_nodes() for instance in instances]
|
||||
n_edges = [instance.graph.number_of_edges() for instance in instances]
|
||||
assert np.round(np.mean(n_nodes)) == 35.0
|
||||
assert np.round(np.mean(n_edges), -1) == 300.0
|
||||
79
tests/problems/test_tsp.py
Normal file
79
tests/problems/test_tsp.py
Normal file
@@ -0,0 +1,79 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
import numpy as np
|
||||
from numpy.linalg import norm
|
||||
from scipy.spatial.distance import pdist, squareform
|
||||
from scipy.stats import uniform, randint
|
||||
|
||||
from miplearn.problems.tsp import TravelingSalesmanGenerator, TravelingSalesmanInstance
|
||||
from miplearn.solvers.learning import LearningSolver
|
||||
|
||||
|
||||
def test_generator():
|
||||
instances = TravelingSalesmanGenerator(
|
||||
x=uniform(loc=0.0, scale=1000.0),
|
||||
y=uniform(loc=0.0, scale=1000.0),
|
||||
n=randint(low=100, high=101),
|
||||
gamma=uniform(loc=0.95, scale=0.1),
|
||||
fix_cities=True,
|
||||
).generate(100)
|
||||
assert len(instances) == 100
|
||||
assert instances[0].n_cities == 100
|
||||
assert norm(instances[0].distances - instances[0].distances.T) < 1e-6
|
||||
d = [instance.distances[0, 1] for instance in instances]
|
||||
assert np.std(d) > 0
|
||||
|
||||
|
||||
def test_instance():
|
||||
n_cities = 4
|
||||
distances = np.array(
|
||||
[
|
||||
[0.0, 1.0, 2.0, 1.0],
|
||||
[1.0, 0.0, 1.0, 2.0],
|
||||
[2.0, 1.0, 0.0, 1.0],
|
||||
[1.0, 2.0, 1.0, 0.0],
|
||||
]
|
||||
)
|
||||
instance = TravelingSalesmanInstance(n_cities, distances)
|
||||
solver = LearningSolver()
|
||||
stats = solver.solve(instance)
|
||||
x = instance.training_data[0]["Solution"]["x"]
|
||||
assert x[0, 1] == 1.0
|
||||
assert x[0, 2] == 0.0
|
||||
assert x[0, 3] == 1.0
|
||||
assert x[1, 2] == 1.0
|
||||
assert x[1, 3] == 0.0
|
||||
assert x[2, 3] == 1.0
|
||||
assert stats["Lower bound"] == 4.0
|
||||
assert stats["Upper bound"] == 4.0
|
||||
|
||||
|
||||
def test_subtour():
|
||||
n_cities = 6
|
||||
cities = np.array(
|
||||
[
|
||||
[0.0, 0.0],
|
||||
[1.0, 0.0],
|
||||
[2.0, 0.0],
|
||||
[3.0, 0.0],
|
||||
[0.0, 1.0],
|
||||
[3.0, 1.0],
|
||||
]
|
||||
)
|
||||
distances = squareform(pdist(cities))
|
||||
instance = TravelingSalesmanInstance(n_cities, distances)
|
||||
solver = LearningSolver()
|
||||
solver.solve(instance)
|
||||
assert hasattr(instance, "found_violated_lazy_constraints")
|
||||
assert hasattr(instance, "found_violated_user_cuts")
|
||||
x = instance.training_data[0]["Solution"]["x"]
|
||||
assert x[0, 1] == 1.0
|
||||
assert x[0, 4] == 1.0
|
||||
assert x[1, 2] == 1.0
|
||||
assert x[2, 3] == 1.0
|
||||
assert x[3, 5] == 1.0
|
||||
assert x[4, 5] == 1.0
|
||||
solver.fit([instance])
|
||||
solver.solve(instance)
|
||||
70
tests/solvers/__init__.py
Normal file
70
tests/solvers/__init__.py
Normal file
@@ -0,0 +1,70 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from inspect import isclass
|
||||
from typing import List, Callable, Any
|
||||
|
||||
from pyomo import environ as pe
|
||||
|
||||
from miplearn.instance import Instance
|
||||
from miplearn.problems.knapsack import KnapsackInstance, GurobiKnapsackInstance
|
||||
from miplearn.solvers.gurobi import GurobiSolver
|
||||
from miplearn.solvers.internal import InternalSolver
|
||||
from miplearn.solvers.pyomo.base import BasePyomoSolver
|
||||
from miplearn.solvers.pyomo.gurobi import GurobiPyomoSolver
|
||||
from miplearn.solvers.pyomo.xpress import XpressPyomoSolver
|
||||
|
||||
|
||||
class InfeasiblePyomoInstance(Instance):
|
||||
def to_model(self) -> pe.ConcreteModel:
|
||||
model = pe.ConcreteModel()
|
||||
model.x = pe.Var([0], domain=pe.Binary)
|
||||
model.OBJ = pe.Objective(expr=model.x[0], sense=pe.maximize)
|
||||
model.eq = pe.Constraint(expr=model.x[0] >= 2)
|
||||
return model
|
||||
|
||||
|
||||
class InfeasibleGurobiInstance(Instance):
|
||||
def to_model(self) -> Any:
|
||||
import gurobipy as gp
|
||||
from gurobipy import GRB
|
||||
|
||||
model = gp.Model()
|
||||
x = model.addVars(1, vtype=GRB.BINARY, name="x")
|
||||
model.addConstr(x[0] >= 2)
|
||||
model.setObjective(x[0])
|
||||
return model
|
||||
|
||||
|
||||
def _is_subclass_or_instance(obj, parent_class):
|
||||
return isinstance(obj, parent_class) or (
|
||||
isclass(obj) and issubclass(obj, parent_class)
|
||||
)
|
||||
|
||||
|
||||
def _get_knapsack_instance(solver):
|
||||
if _is_subclass_or_instance(solver, BasePyomoSolver):
|
||||
return KnapsackInstance(
|
||||
weights=[23.0, 26.0, 20.0, 18.0],
|
||||
prices=[505.0, 352.0, 458.0, 220.0],
|
||||
capacity=67.0,
|
||||
)
|
||||
if _is_subclass_or_instance(solver, GurobiSolver):
|
||||
return GurobiKnapsackInstance(
|
||||
weights=[23.0, 26.0, 20.0, 18.0],
|
||||
prices=[505.0, 352.0, 458.0, 220.0],
|
||||
capacity=67.0,
|
||||
)
|
||||
assert False
|
||||
|
||||
|
||||
def _get_infeasible_instance(solver):
|
||||
if _is_subclass_or_instance(solver, BasePyomoSolver):
|
||||
return InfeasiblePyomoInstance()
|
||||
if _is_subclass_or_instance(solver, GurobiSolver):
|
||||
return InfeasibleGurobiInstance()
|
||||
|
||||
|
||||
def _get_internal_solvers() -> List[Callable[[], InternalSolver]]:
|
||||
return [GurobiPyomoSolver, GurobiSolver, XpressPyomoSolver]
|
||||
219
tests/solvers/test_internal_solver.py
Normal file
219
tests/solvers/test_internal_solver.py
Normal file
@@ -0,0 +1,219 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
import logging
|
||||
from io import StringIO
|
||||
from warnings import warn
|
||||
|
||||
import pyomo.environ as pe
|
||||
|
||||
from miplearn.solvers import _RedirectOutput
|
||||
from miplearn.solvers.gurobi import GurobiSolver
|
||||
from miplearn.solvers.pyomo.base import BasePyomoSolver
|
||||
from . import (
|
||||
_get_knapsack_instance,
|
||||
_get_internal_solvers,
|
||||
_get_infeasible_instance,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def test_redirect_output():
|
||||
import sys
|
||||
|
||||
original_stdout = sys.stdout
|
||||
io = StringIO()
|
||||
with _RedirectOutput([io]):
|
||||
print("Hello world")
|
||||
assert sys.stdout == original_stdout
|
||||
assert io.getvalue() == "Hello world\n"
|
||||
|
||||
|
||||
def test_internal_solver_warm_starts():
|
||||
for solver_class in _get_internal_solvers():
|
||||
logger.info("Solver: %s" % solver_class)
|
||||
instance = _get_knapsack_instance(solver_class)
|
||||
model = instance.to_model()
|
||||
solver = solver_class()
|
||||
solver.set_instance(instance, model)
|
||||
solver.set_warm_start(
|
||||
{
|
||||
"x": {
|
||||
0: 1.0,
|
||||
1: 0.0,
|
||||
2: 0.0,
|
||||
3: 1.0,
|
||||
}
|
||||
}
|
||||
)
|
||||
stats = solver.solve(tee=True)
|
||||
if stats["Warm start value"] is not None:
|
||||
assert stats["Warm start value"] == 725.0
|
||||
else:
|
||||
warn(f"{solver_class.__name__} should set warm start value")
|
||||
|
||||
solver.set_warm_start(
|
||||
{
|
||||
"x": {
|
||||
0: 1.0,
|
||||
1: 1.0,
|
||||
2: 1.0,
|
||||
3: 1.0,
|
||||
}
|
||||
}
|
||||
)
|
||||
stats = solver.solve(tee=True)
|
||||
assert stats["Warm start value"] is None
|
||||
|
||||
solver.fix(
|
||||
{
|
||||
"x": {
|
||||
0: 1.0,
|
||||
1: 0.0,
|
||||
2: 0.0,
|
||||
3: 1.0,
|
||||
}
|
||||
}
|
||||
)
|
||||
stats = solver.solve(tee=True)
|
||||
assert stats["Lower bound"] == 725.0
|
||||
assert stats["Upper bound"] == 725.0
|
||||
|
||||
|
||||
def test_internal_solver():
|
||||
for solver_class in _get_internal_solvers():
|
||||
logger.info("Solver: %s" % solver_class)
|
||||
|
||||
instance = _get_knapsack_instance(solver_class)
|
||||
model = instance.to_model()
|
||||
solver = solver_class()
|
||||
solver.set_instance(instance, model)
|
||||
|
||||
stats = solver.solve_lp()
|
||||
assert not solver.is_infeasible()
|
||||
assert round(stats["Optimal value"], 3) == 1287.923
|
||||
assert len(stats["Log"]) > 100
|
||||
|
||||
solution = solver.get_solution()
|
||||
assert round(solution["x"][0], 3) == 1.000
|
||||
assert round(solution["x"][1], 3) == 0.923
|
||||
assert round(solution["x"][2], 3) == 1.000
|
||||
assert round(solution["x"][3], 3) == 0.000
|
||||
|
||||
stats = solver.solve(tee=True)
|
||||
assert not solver.is_infeasible()
|
||||
assert len(stats["Log"]) > 100
|
||||
assert stats["Lower bound"] == 1183.0
|
||||
assert stats["Upper bound"] == 1183.0
|
||||
assert stats["Sense"] == "max"
|
||||
assert isinstance(stats["Wallclock time"], float)
|
||||
|
||||
solution = solver.get_solution()
|
||||
assert solution["x"][0] == 1.0
|
||||
assert solution["x"][1] == 0.0
|
||||
assert solution["x"][2] == 1.0
|
||||
assert solution["x"][3] == 1.0
|
||||
|
||||
# Add a brand new constraint
|
||||
if isinstance(solver, BasePyomoSolver):
|
||||
model.cut = pe.Constraint(expr=model.x[0] <= 0.0, name="cut")
|
||||
solver.add_constraint(model.cut)
|
||||
elif isinstance(solver, GurobiSolver):
|
||||
x = model.getVarByName("x[0]")
|
||||
solver.add_constraint(x <= 0.0, name="cut")
|
||||
else:
|
||||
raise Exception("Illegal state")
|
||||
|
||||
# New constraint should affect solution and should be listed in
|
||||
# constraint ids
|
||||
assert solver.get_constraint_ids() == ["eq_capacity", "cut"]
|
||||
stats = solver.solve()
|
||||
assert stats["Lower bound"] == 1030.0
|
||||
|
||||
assert solver.get_sense() == "max"
|
||||
assert solver.get_constraint_sense("cut") == "<"
|
||||
assert solver.get_constraint_sense("eq_capacity") == "<"
|
||||
|
||||
# Verify slacks
|
||||
assert solver.get_inequality_slacks() == {
|
||||
"cut": 0.0,
|
||||
"eq_capacity": 3.0,
|
||||
}
|
||||
|
||||
if isinstance(solver, GurobiSolver):
|
||||
# Extract the new constraint
|
||||
cobj = solver.extract_constraint("cut")
|
||||
|
||||
# New constraint should no longer affect solution and should no longer
|
||||
# be listed in constraint ids
|
||||
assert solver.get_constraint_ids() == ["eq_capacity"]
|
||||
stats = solver.solve()
|
||||
assert stats["Lower bound"] == 1183.0
|
||||
|
||||
# New constraint should not be satisfied by current solution
|
||||
assert not solver.is_constraint_satisfied(cobj)
|
||||
|
||||
# Re-add constraint
|
||||
solver.add_constraint(cobj)
|
||||
|
||||
# Constraint should affect solution again
|
||||
assert solver.get_constraint_ids() == ["eq_capacity", "cut"]
|
||||
stats = solver.solve()
|
||||
assert stats["Lower bound"] == 1030.0
|
||||
|
||||
# New constraint should now be satisfied
|
||||
assert solver.is_constraint_satisfied(cobj)
|
||||
|
||||
# Relax problem and make cut into an equality constraint
|
||||
solver.relax()
|
||||
solver.set_constraint_sense("cut", "=")
|
||||
stats = solver.solve()
|
||||
assert round(stats["Lower bound"]) == 1030.0
|
||||
assert round(solver.get_dual("eq_capacity")) == 0.0
|
||||
|
||||
|
||||
def test_relax():
|
||||
for solver_class in _get_internal_solvers():
|
||||
instance = _get_knapsack_instance(solver_class)
|
||||
solver = solver_class()
|
||||
solver.set_instance(instance)
|
||||
solver.relax()
|
||||
stats = solver.solve()
|
||||
assert round(stats["Lower bound"]) == 1288.0
|
||||
|
||||
|
||||
def test_infeasible_instance():
|
||||
for solver_class in _get_internal_solvers():
|
||||
instance = _get_infeasible_instance(solver_class)
|
||||
solver = solver_class()
|
||||
solver.set_instance(instance)
|
||||
stats = solver.solve()
|
||||
|
||||
assert solver.is_infeasible()
|
||||
assert solver.get_solution() is None
|
||||
assert stats["Upper bound"] is None
|
||||
assert stats["Lower bound"] is None
|
||||
|
||||
stats = solver.solve_lp()
|
||||
assert solver.get_solution() is None
|
||||
assert stats["Optimal value"] is None
|
||||
assert solver.get_value("x", 0) is None
|
||||
|
||||
|
||||
def test_iteration_cb():
|
||||
for solver_class in _get_internal_solvers():
|
||||
logger.info("Solver: %s" % solver_class)
|
||||
instance = _get_knapsack_instance(solver_class)
|
||||
solver = solver_class()
|
||||
solver.set_instance(instance)
|
||||
count = 0
|
||||
|
||||
def custom_iteration_cb():
|
||||
nonlocal count
|
||||
count += 1
|
||||
return count < 5
|
||||
|
||||
solver.solve(iteration_cb=custom_iteration_cb)
|
||||
assert count == 5
|
||||
27
tests/solvers/test_lazy_cb.py
Normal file
27
tests/solvers/test_lazy_cb.py
Normal file
@@ -0,0 +1,27 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
import logging
|
||||
|
||||
from miplearn.solvers.gurobi import GurobiSolver
|
||||
from . import _get_knapsack_instance
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def test_lazy_cb():
|
||||
solver = GurobiSolver()
|
||||
instance = _get_knapsack_instance(solver)
|
||||
model = instance.to_model()
|
||||
|
||||
def lazy_cb(cb_solver, cb_model):
|
||||
logger.info("x[0] = %.f" % cb_solver.get_value("x", 0))
|
||||
cobj = (cb_model.getVarByName("x[0]") * 1.0, "<", 0.0, "cut")
|
||||
if not cb_solver.is_constraint_satisfied(cobj):
|
||||
cb_solver.add_constraint(cobj)
|
||||
|
||||
solver.set_instance(instance, model)
|
||||
solver.solve(lazy_cb=lazy_cb)
|
||||
solution = solver.get_solution()
|
||||
assert solution["x"][0] == 0.0
|
||||
142
tests/solvers/test_learning_solver.py
Normal file
142
tests/solvers/test_learning_solver.py
Normal file
@@ -0,0 +1,142 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
import logging
|
||||
import pickle
|
||||
import tempfile
|
||||
import os
|
||||
|
||||
from miplearn.solvers.gurobi import GurobiSolver
|
||||
from miplearn.solvers.learning import LearningSolver
|
||||
from . import _get_knapsack_instance, _get_internal_solvers
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def test_learning_solver():
|
||||
for mode in ["exact", "heuristic"]:
|
||||
for internal_solver in _get_internal_solvers():
|
||||
logger.info("Solver: %s" % internal_solver)
|
||||
instance = _get_knapsack_instance(internal_solver)
|
||||
solver = LearningSolver(
|
||||
solver=internal_solver,
|
||||
mode=mode,
|
||||
)
|
||||
|
||||
solver.solve(instance)
|
||||
data = instance.training_data[0]
|
||||
assert data["Solution"]["x"][0] == 1.0
|
||||
assert data["Solution"]["x"][1] == 0.0
|
||||
assert data["Solution"]["x"][2] == 1.0
|
||||
assert data["Solution"]["x"][3] == 1.0
|
||||
assert data["Lower bound"] == 1183.0
|
||||
assert data["Upper bound"] == 1183.0
|
||||
assert round(data["LP solution"]["x"][0], 3) == 1.000
|
||||
assert round(data["LP solution"]["x"][1], 3) == 0.923
|
||||
assert round(data["LP solution"]["x"][2], 3) == 1.000
|
||||
assert round(data["LP solution"]["x"][3], 3) == 0.000
|
||||
assert round(data["LP value"], 3) == 1287.923
|
||||
assert len(data["MIP log"]) > 100
|
||||
|
||||
solver.fit([instance])
|
||||
solver.solve(instance)
|
||||
|
||||
# Assert solver is picklable
|
||||
with tempfile.TemporaryFile() as file:
|
||||
pickle.dump(solver, file)
|
||||
|
||||
|
||||
def test_solve_without_lp():
|
||||
for internal_solver in _get_internal_solvers():
|
||||
logger.info("Solver: %s" % internal_solver)
|
||||
instance = _get_knapsack_instance(internal_solver)
|
||||
solver = LearningSolver(
|
||||
solver=internal_solver,
|
||||
solve_lp_first=False,
|
||||
)
|
||||
solver.solve(instance)
|
||||
solver.fit([instance])
|
||||
solver.solve(instance)
|
||||
|
||||
|
||||
def test_parallel_solve():
|
||||
for internal_solver in _get_internal_solvers():
|
||||
instances = [_get_knapsack_instance(internal_solver) for _ in range(10)]
|
||||
solver = LearningSolver(solver=internal_solver)
|
||||
results = solver.parallel_solve(instances, n_jobs=3)
|
||||
assert len(results) == 10
|
||||
for instance in instances:
|
||||
data = instance.training_data[0]
|
||||
assert len(data["Solution"]["x"].keys()) == 4
|
||||
|
||||
|
||||
def test_solve_fit_from_disk():
|
||||
for internal_solver in _get_internal_solvers():
|
||||
# Create instances and pickle them
|
||||
filenames = []
|
||||
for k in range(3):
|
||||
instance = _get_knapsack_instance(internal_solver)
|
||||
with tempfile.NamedTemporaryFile(suffix=".pkl", delete=False) as file:
|
||||
filenames += [file.name]
|
||||
pickle.dump(instance, file)
|
||||
|
||||
# Test: solve
|
||||
solver = LearningSolver(solver=internal_solver)
|
||||
solver.solve(filenames[0])
|
||||
with open(filenames[0], "rb") as file:
|
||||
instance = pickle.load(file)
|
||||
assert len(instance.training_data) > 0
|
||||
|
||||
# Test: parallel_solve
|
||||
solver.parallel_solve(filenames)
|
||||
for filename in filenames:
|
||||
with open(filename, "rb") as file:
|
||||
instance = pickle.load(file)
|
||||
assert len(instance.training_data) > 0
|
||||
|
||||
# Test: solve (with specified output)
|
||||
output = [f + ".out" for f in filenames]
|
||||
solver.solve(
|
||||
filenames[0],
|
||||
output_filename=output[0],
|
||||
)
|
||||
assert os.path.isfile(output[0])
|
||||
|
||||
# Test: parallel_solve (with specified output)
|
||||
solver.parallel_solve(
|
||||
filenames,
|
||||
output_filenames=output,
|
||||
)
|
||||
for filename in output:
|
||||
assert os.path.isfile(filename)
|
||||
|
||||
# Delete temporary files
|
||||
for filename in filenames:
|
||||
os.remove(filename)
|
||||
for filename in output:
|
||||
os.remove(filename)
|
||||
|
||||
|
||||
def test_simulate_perfect():
|
||||
internal_solver = GurobiSolver
|
||||
instance = _get_knapsack_instance(internal_solver)
|
||||
with tempfile.NamedTemporaryFile(suffix=".pkl", delete=False) as tmp:
|
||||
pickle.dump(instance, tmp)
|
||||
tmp.flush()
|
||||
solver = LearningSolver(
|
||||
solver=internal_solver,
|
||||
simulate_perfect=True,
|
||||
)
|
||||
stats = solver.solve(tmp.name)
|
||||
assert stats["Lower bound"] == stats["Predicted LB"]
|
||||
|
||||
|
||||
def test_gap():
|
||||
assert LearningSolver._compute_gap(ub=0.0, lb=0.0) == 0.0
|
||||
assert LearningSolver._compute_gap(ub=1.0, lb=0.5) == 0.5
|
||||
assert LearningSolver._compute_gap(ub=1.0, lb=1.0) == 0.0
|
||||
assert LearningSolver._compute_gap(ub=1.0, lb=-1.0) is None
|
||||
assert LearningSolver._compute_gap(ub=1.0, lb=None) is None
|
||||
assert LearningSolver._compute_gap(ub=None, lb=1.0) is None
|
||||
assert LearningSolver._compute_gap(ub=None, lb=None) is None
|
||||
35
tests/test_benchmark.py
Normal file
35
tests/test_benchmark.py
Normal file
@@ -0,0 +1,35 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
import os.path
|
||||
|
||||
from miplearn.benchmark import BenchmarkRunner
|
||||
from miplearn.problems.stab import MaxWeightStableSetGenerator
|
||||
from scipy.stats import randint
|
||||
|
||||
from miplearn.solvers.learning import LearningSolver
|
||||
|
||||
|
||||
def test_benchmark():
|
||||
# Generate training and test instances
|
||||
generator = MaxWeightStableSetGenerator(n=randint(low=25, high=26))
|
||||
train_instances = generator.generate(5)
|
||||
test_instances = generator.generate(3)
|
||||
|
||||
# Training phase...
|
||||
training_solver = LearningSolver()
|
||||
training_solver.parallel_solve(train_instances, n_jobs=10)
|
||||
|
||||
# Test phase...
|
||||
test_solvers = {
|
||||
"Strategy A": LearningSolver(),
|
||||
"Strategy B": LearningSolver(),
|
||||
}
|
||||
benchmark = BenchmarkRunner(test_solvers)
|
||||
benchmark.fit(train_instances)
|
||||
benchmark.parallel_solve(test_instances, n_jobs=2, n_trials=2)
|
||||
assert benchmark.results.values.shape == (12, 14)
|
||||
|
||||
benchmark.write_csv("/tmp/benchmark.csv")
|
||||
assert os.path.isfile("/tmp/benchmark.csv")
|
||||
69
tests/test_extractors.py
Normal file
69
tests/test_extractors.py
Normal file
@@ -0,0 +1,69 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
import numpy as np
|
||||
|
||||
from miplearn.extractors import (
|
||||
SolutionExtractor,
|
||||
InstanceFeaturesExtractor,
|
||||
VariableFeaturesExtractor,
|
||||
)
|
||||
from miplearn.problems.knapsack import KnapsackInstance
|
||||
from miplearn.solvers.learning import LearningSolver
|
||||
|
||||
|
||||
def _get_instances():
|
||||
instances = [
|
||||
KnapsackInstance(
|
||||
weights=[1.0, 2.0, 3.0],
|
||||
prices=[10.0, 20.0, 30.0],
|
||||
capacity=2.5,
|
||||
),
|
||||
KnapsackInstance(
|
||||
weights=[3.0, 4.0, 5.0],
|
||||
prices=[20.0, 30.0, 40.0],
|
||||
capacity=4.5,
|
||||
),
|
||||
]
|
||||
models = [instance.to_model() for instance in instances]
|
||||
solver = LearningSolver()
|
||||
for (i, instance) in enumerate(instances):
|
||||
solver.solve(instances[i], models[i])
|
||||
return instances, models
|
||||
|
||||
|
||||
def test_solution_extractor():
|
||||
instances, models = _get_instances()
|
||||
features = SolutionExtractor().extract(instances)
|
||||
assert isinstance(features, dict)
|
||||
assert "default" in features.keys()
|
||||
assert isinstance(features["default"], np.ndarray)
|
||||
assert features["default"].shape == (6, 2)
|
||||
assert features["default"].ravel().tolist() == [
|
||||
1.0,
|
||||
0.0,
|
||||
0.0,
|
||||
1.0,
|
||||
1.0,
|
||||
0.0,
|
||||
1.0,
|
||||
0.0,
|
||||
0.0,
|
||||
1.0,
|
||||
1.0,
|
||||
0.0,
|
||||
]
|
||||
|
||||
|
||||
def test_instance_features_extractor():
|
||||
instances, models = _get_instances()
|
||||
features = InstanceFeaturesExtractor().extract(instances)
|
||||
assert features.shape == (2, 3)
|
||||
|
||||
|
||||
def test_variable_features_extractor():
|
||||
instances, models = _get_instances()
|
||||
features = VariableFeaturesExtractor().extract(instances)
|
||||
assert isinstance(features, dict)
|
||||
assert "default" in features
|
||||
assert features["default"].shape == (6, 5)
|
||||
Reference in New Issue
Block a user