mirror of
https://github.com/ANL-CEEESA/MIPLearn.git
synced 2025-12-06 01:18:52 -06:00
Add types to remaining files; activate mypy's disallow_untyped_defs
This commit is contained in:
@@ -1,6 +1,6 @@
|
|||||||
[mypy]
|
[mypy]
|
||||||
ignore_missing_imports = True
|
ignore_missing_imports = True
|
||||||
#disallow_untyped_defs = True
|
disallow_untyped_defs = True
|
||||||
disallow_untyped_calls = True
|
disallow_untyped_calls = True
|
||||||
disallow_incomplete_defs = True
|
disallow_incomplete_defs = True
|
||||||
pretty = True
|
pretty = True
|
||||||
|
|||||||
1
Makefile
1
Makefile
@@ -44,6 +44,7 @@ reformat:
|
|||||||
test:
|
test:
|
||||||
$(MYPY) -p miplearn
|
$(MYPY) -p miplearn
|
||||||
$(MYPY) -p tests
|
$(MYPY) -p tests
|
||||||
|
$(MYPY) -p benchmark
|
||||||
$(PYTEST) $(PYTEST_ARGS)
|
$(PYTEST) $(PYTEST_ARGS)
|
||||||
|
|
||||||
.PHONY: test test-watch docs install
|
.PHONY: test test-watch docs install
|
||||||
|
|||||||
0
benchmark/__init__.py
Normal file
0
benchmark/__init__.py
Normal file
@@ -24,6 +24,7 @@ import importlib
|
|||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
from typing import Dict
|
||||||
|
|
||||||
import matplotlib.pyplot as plt
|
import matplotlib.pyplot as plt
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
@@ -46,7 +47,7 @@ logging.getLogger("pyomo.core").setLevel(logging.ERROR)
|
|||||||
logger = logging.getLogger("benchmark")
|
logger = logging.getLogger("benchmark")
|
||||||
|
|
||||||
|
|
||||||
def train(args):
|
def train(args: Dict) -> None:
|
||||||
basepath = args["<challenge>"]
|
basepath = args["<challenge>"]
|
||||||
problem_name, challenge_name = args["<challenge>"].split("/")
|
problem_name, challenge_name = args["<challenge>"].split("/")
|
||||||
pkg = importlib.import_module(f"miplearn.problems.{problem_name}")
|
pkg = importlib.import_module(f"miplearn.problems.{problem_name}")
|
||||||
@@ -76,7 +77,7 @@ def train(args):
|
|||||||
Path(done_filename).touch(exist_ok=True)
|
Path(done_filename).touch(exist_ok=True)
|
||||||
|
|
||||||
|
|
||||||
def test_baseline(args):
|
def test_baseline(args: Dict) -> None:
|
||||||
basepath = args["<challenge>"]
|
basepath = args["<challenge>"]
|
||||||
test_instances = [PickleGzInstance(f) for f in glob.glob(f"{basepath}/test/*.gz")]
|
test_instances = [PickleGzInstance(f) for f in glob.glob(f"{basepath}/test/*.gz")]
|
||||||
csv_filename = f"{basepath}/benchmark_baseline.csv"
|
csv_filename = f"{basepath}/benchmark_baseline.csv"
|
||||||
@@ -99,7 +100,7 @@ def test_baseline(args):
|
|||||||
benchmark.write_csv(csv_filename)
|
benchmark.write_csv(csv_filename)
|
||||||
|
|
||||||
|
|
||||||
def test_ml(args):
|
def test_ml(args: Dict) -> None:
|
||||||
basepath = args["<challenge>"]
|
basepath = args["<challenge>"]
|
||||||
test_instances = [PickleGzInstance(f) for f in glob.glob(f"{basepath}/test/*.gz")]
|
test_instances = [PickleGzInstance(f) for f in glob.glob(f"{basepath}/test/*.gz")]
|
||||||
train_instances = [PickleGzInstance(f) for f in glob.glob(f"{basepath}/train/*.gz")]
|
train_instances = [PickleGzInstance(f) for f in glob.glob(f"{basepath}/train/*.gz")]
|
||||||
@@ -133,7 +134,7 @@ def test_ml(args):
|
|||||||
benchmark.write_csv(csv_filename)
|
benchmark.write_csv(csv_filename)
|
||||||
|
|
||||||
|
|
||||||
def charts(args):
|
def charts(args: Dict) -> None:
|
||||||
basepath = args["<challenge>"]
|
basepath = args["<challenge>"]
|
||||||
sns.set_style("whitegrid")
|
sns.set_style("whitegrid")
|
||||||
sns.set_palette("Blues_r")
|
sns.set_palette("Blues_r")
|
||||||
@@ -244,7 +245,7 @@ def charts(args):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
def main() -> None:
|
||||||
args = docopt(__doc__)
|
args = docopt(__doc__)
|
||||||
if args["train"]:
|
if args["train"]:
|
||||||
train(args)
|
train(args)
|
||||||
@@ -254,3 +255,7 @@ if __name__ == "__main__":
|
|||||||
test_ml(args)
|
test_ml(args)
|
||||||
if args["charts"]:
|
if args["charts"]:
|
||||||
charts(args)
|
charts(args)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ from miplearn.classifiers.counting import CountingClassifier
|
|||||||
E = 0.1
|
E = 0.1
|
||||||
|
|
||||||
|
|
||||||
def test_counting():
|
def test_counting() -> None:
|
||||||
clf = CountingClassifier()
|
clf = CountingClassifier()
|
||||||
n_features = 25
|
n_features = 25
|
||||||
x_train = np.zeros((8, n_features))
|
x_train = np.zeros((8, n_features))
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ from sklearn.neighbors import KNeighborsClassifier
|
|||||||
from miplearn.classifiers.sklearn import ScikitLearnClassifier, ScikitLearnRegressor
|
from miplearn.classifiers.sklearn import ScikitLearnClassifier, ScikitLearnRegressor
|
||||||
|
|
||||||
|
|
||||||
def test_constant_prediction():
|
def test_constant_prediction() -> None:
|
||||||
x_train = np.array([[0.0, 1.0], [1.0, 0.0]])
|
x_train = np.array([[0.0, 1.0], [1.0, 0.0]])
|
||||||
y_train = np.array([[True, False], [True, False]])
|
y_train = np.array([[True, False], [True, False]])
|
||||||
clf = ScikitLearnClassifier(KNeighborsClassifier(n_neighbors=1))
|
clf = ScikitLearnClassifier(KNeighborsClassifier(n_neighbors=1))
|
||||||
@@ -22,7 +22,7 @@ def test_constant_prediction():
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def test_regressor():
|
def test_regressor() -> None:
|
||||||
x_train = np.array([[0.0, 1.0], [1.0, 4.0], [2.0, 2.0]])
|
x_train = np.array([[0.0, 1.0], [1.0, 4.0], [2.0, 2.0]])
|
||||||
y_train = np.array([[1.0], [5.0], [4.0]])
|
y_train = np.array([[1.0], [5.0], [4.0]])
|
||||||
x_test = np.array([[4.0, 4.0], [0.0, 0.0]])
|
x_test = np.array([[4.0, 4.0], [0.0, 0.0]])
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ from miplearn.classifiers import Classifier
|
|||||||
from miplearn.classifiers.threshold import MinPrecisionThreshold
|
from miplearn.classifiers.threshold import MinPrecisionThreshold
|
||||||
|
|
||||||
|
|
||||||
def test_threshold_dynamic():
|
def test_threshold_dynamic() -> None:
|
||||||
clf = Mock(spec=Classifier)
|
clf = Mock(spec=Classifier)
|
||||||
clf.predict_proba = Mock(
|
clf.predict_proba = Mock(
|
||||||
return_value=np.array(
|
return_value=np.array(
|
||||||
|
|||||||
@@ -1,14 +1,16 @@
|
|||||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||||
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
|
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
|
||||||
# Released under the modified BSD license. See COPYING.md for more details.
|
# Released under the modified BSD license. See COPYING.md for more details.
|
||||||
|
from typing import Dict, Tuple
|
||||||
from unittest.mock import Mock
|
from unittest.mock import Mock
|
||||||
|
|
||||||
from miplearn.components.component import Component
|
from miplearn.components.component import Component
|
||||||
|
from miplearn.features import Features, TrainingSample
|
||||||
from miplearn.instance.base import Instance
|
from miplearn.instance.base import Instance
|
||||||
|
|
||||||
|
|
||||||
def test_xy_instance():
|
def test_xy_instance() -> None:
|
||||||
def _sample_xy(features, sample):
|
def _sample_xy(features: Features, sample: str) -> Tuple[Dict, Dict]:
|
||||||
x = {
|
x = {
|
||||||
"s1": {
|
"s1": {
|
||||||
"category_a": [
|
"category_a": [
|
||||||
@@ -58,7 +60,7 @@ def test_xy_instance():
|
|||||||
instance_2 = Mock(spec=Instance)
|
instance_2 = Mock(spec=Instance)
|
||||||
instance_2.training_data = ["s3"]
|
instance_2.training_data = ["s3"]
|
||||||
instance_2.features = {}
|
instance_2.features = {}
|
||||||
comp.sample_xy = _sample_xy
|
comp.sample_xy = _sample_xy # type: ignore
|
||||||
x_expected = {
|
x_expected = {
|
||||||
"category_a": [
|
"category_a": [
|
||||||
[1, 2, 3],
|
[1, 2, 3],
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
# Released under the modified BSD license. See COPYING.md for more details.
|
# Released under the modified BSD license. See COPYING.md for more details.
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
from typing import Any, FrozenSet, Hashable
|
from typing import Any, FrozenSet, Hashable, List
|
||||||
|
|
||||||
import gurobipy as gp
|
import gurobipy as gp
|
||||||
import networkx as nx
|
import networkx as nx
|
||||||
@@ -39,7 +39,7 @@ class GurobiStableSetProblem(Instance):
|
|||||||
return True
|
return True
|
||||||
|
|
||||||
@overrides
|
@overrides
|
||||||
def find_violated_user_cuts(self, model):
|
def find_violated_user_cuts(self, model: Any) -> List[FrozenSet]:
|
||||||
assert isinstance(model, gp.Model)
|
assert isinstance(model, gp.Model)
|
||||||
vals = model.cbGetNodeRel(model.getVars())
|
vals = model.cbGetNodeRel(model.getVars())
|
||||||
violations = []
|
violations = []
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||||
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
|
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
|
||||||
# Released under the modified BSD license. See COPYING.md for more details.
|
# Released under the modified BSD license. See COPYING.md for more details.
|
||||||
|
from typing import cast
|
||||||
from unittest.mock import Mock
|
from unittest.mock import Mock
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
@@ -179,9 +180,9 @@ def test_predict() -> None:
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def test_fit_xy():
|
def test_fit_xy() -> None:
|
||||||
clf = Mock(spec=Classifier)
|
clf = Mock(spec=Classifier)
|
||||||
clf.clone = lambda: Mock(spec=Classifier)
|
clf.clone = lambda: Mock(spec=Classifier) # type: ignore
|
||||||
thr = Mock(spec=Threshold)
|
thr = Mock(spec=Threshold)
|
||||||
thr.clone = lambda: Mock(spec=Threshold)
|
thr.clone = lambda: Mock(spec=Threshold)
|
||||||
comp = PrimalSolutionComponent(classifier=clf, threshold=thr)
|
comp = PrimalSolutionComponent(classifier=clf, threshold=thr)
|
||||||
@@ -197,17 +198,17 @@ def test_fit_xy():
|
|||||||
for category in ["type-a", "type-b"]:
|
for category in ["type-a", "type-b"]:
|
||||||
assert category in comp.classifiers
|
assert category in comp.classifiers
|
||||||
assert category in comp.thresholds
|
assert category in comp.thresholds
|
||||||
clf = comp.classifiers[category]
|
clf = comp.classifiers[category] # type: ignore
|
||||||
clf.fit.assert_called_once()
|
clf.fit.assert_called_once()
|
||||||
assert_array_equal(x[category], clf.fit.call_args[0][0])
|
assert_array_equal(x[category], clf.fit.call_args[0][0])
|
||||||
assert_array_equal(y[category], clf.fit.call_args[0][1])
|
assert_array_equal(y[category], clf.fit.call_args[0][1])
|
||||||
thr = comp.thresholds[category]
|
thr = comp.thresholds[category] # type: ignore
|
||||||
thr.fit.assert_called_once()
|
thr.fit.assert_called_once()
|
||||||
assert_array_equal(x[category], thr.fit.call_args[0][1])
|
assert_array_equal(x[category], thr.fit.call_args[0][1])
|
||||||
assert_array_equal(y[category], thr.fit.call_args[0][2])
|
assert_array_equal(y[category], thr.fit.call_args[0][2])
|
||||||
|
|
||||||
|
|
||||||
def test_usage():
|
def test_usage() -> None:
|
||||||
solver = LearningSolver(
|
solver = LearningSolver(
|
||||||
components=[
|
components=[
|
||||||
PrimalSolutionComponent(),
|
PrimalSolutionComponent(),
|
||||||
|
|||||||
3
tests/fixtures/infeasible.py
vendored
3
tests/fixtures/infeasible.py
vendored
@@ -36,8 +36,9 @@ class InfeasibleGurobiInstance(Instance):
|
|||||||
return model
|
return model
|
||||||
|
|
||||||
|
|
||||||
def get_infeasible_instance(solver):
|
def get_infeasible_instance(solver: Any) -> Instance:
|
||||||
if _is_subclass_or_instance(solver, BasePyomoSolver):
|
if _is_subclass_or_instance(solver, BasePyomoSolver):
|
||||||
return InfeasiblePyomoInstance()
|
return InfeasiblePyomoInstance()
|
||||||
if _is_subclass_or_instance(solver, GurobiSolver):
|
if _is_subclass_or_instance(solver, GurobiSolver):
|
||||||
return InfeasibleGurobiInstance()
|
return InfeasibleGurobiInstance()
|
||||||
|
assert False
|
||||||
|
|||||||
5
tests/fixtures/knapsack.py
vendored
5
tests/fixtures/knapsack.py
vendored
@@ -1,6 +1,9 @@
|
|||||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||||
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
|
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
|
||||||
# Released under the modified BSD license. See COPYING.md for more details.
|
# Released under the modified BSD license. See COPYING.md for more details.
|
||||||
|
|
||||||
|
from typing import List, Any, Tuple
|
||||||
|
|
||||||
from miplearn.instance.base import Instance
|
from miplearn.instance.base import Instance
|
||||||
from miplearn.problems.knapsack import KnapsackInstance, GurobiKnapsackInstance
|
from miplearn.problems.knapsack import KnapsackInstance, GurobiKnapsackInstance
|
||||||
from miplearn.solvers.gurobi import GurobiSolver
|
from miplearn.solvers.gurobi import GurobiSolver
|
||||||
@@ -10,7 +13,7 @@ from miplearn.solvers.pyomo.base import BasePyomoSolver
|
|||||||
from tests.solvers import _is_subclass_or_instance
|
from tests.solvers import _is_subclass_or_instance
|
||||||
|
|
||||||
|
|
||||||
def get_test_pyomo_instances():
|
def get_test_pyomo_instances() -> Tuple[List[Instance], List[Any]]:
|
||||||
instances = [
|
instances = [
|
||||||
KnapsackInstance(
|
KnapsackInstance(
|
||||||
weights=[23.0, 26.0, 20.0, 18.0],
|
weights=[23.0, 26.0, 20.0, 18.0],
|
||||||
|
|||||||
3
tests/fixtures/redundant.py
vendored
3
tests/fixtures/redundant.py
vendored
@@ -34,8 +34,9 @@ class GurobiInstanceWithRedundancy(Instance):
|
|||||||
return model
|
return model
|
||||||
|
|
||||||
|
|
||||||
def get_instance_with_redundancy(solver):
|
def get_instance_with_redundancy(solver: Any) -> Instance:
|
||||||
if _is_subclass_or_instance(solver, BasePyomoSolver):
|
if _is_subclass_or_instance(solver, BasePyomoSolver):
|
||||||
return PyomoInstanceWithRedundancy()
|
return PyomoInstanceWithRedundancy()
|
||||||
if _is_subclass_or_instance(solver, GurobiSolver):
|
if _is_subclass_or_instance(solver, GurobiSolver):
|
||||||
return GurobiInstanceWithRedundancy()
|
return GurobiInstanceWithRedundancy()
|
||||||
|
assert False
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ from scipy.stats import uniform, randint
|
|||||||
from miplearn.problems.knapsack import MultiKnapsackGenerator
|
from miplearn.problems.knapsack import MultiKnapsackGenerator
|
||||||
|
|
||||||
|
|
||||||
def test_knapsack_generator():
|
def test_knapsack_generator() -> None:
|
||||||
gen = MultiKnapsackGenerator(
|
gen = MultiKnapsackGenerator(
|
||||||
n=randint(low=100, high=101),
|
n=randint(low=100, high=101),
|
||||||
m=randint(low=30, high=31),
|
m=randint(low=30, high=31),
|
||||||
@@ -20,5 +20,5 @@ def test_knapsack_generator():
|
|||||||
instances = gen.generate(100)
|
instances = gen.generate(100)
|
||||||
w_sum = sum(instance.weights for instance in instances) / len(instances)
|
w_sum = sum(instance.weights for instance in instances) / len(instances)
|
||||||
b_sum = sum(instance.capacities for instance in instances) / len(instances)
|
b_sum = sum(instance.capacities for instance in instances) / len(instances)
|
||||||
assert round(np.mean(w_sum), -1) == 500.0
|
assert round(float(np.mean(w_sum)), -1) == 500.0
|
||||||
assert round(np.mean(b_sum), -3) == 25000.0
|
assert round(float(np.mean(b_sum)), -3) == 25000.0
|
||||||
|
|||||||
@@ -10,16 +10,16 @@ from miplearn.problems.stab import MaxWeightStableSetInstance
|
|||||||
from miplearn.solvers.learning import LearningSolver
|
from miplearn.solvers.learning import LearningSolver
|
||||||
|
|
||||||
|
|
||||||
def test_stab():
|
def test_stab() -> None:
|
||||||
graph = nx.cycle_graph(5)
|
graph = nx.cycle_graph(5)
|
||||||
weights = [1.0, 1.0, 1.0, 1.0, 1.0]
|
weights = np.array([1.0, 1.0, 1.0, 1.0, 1.0])
|
||||||
instance = MaxWeightStableSetInstance(graph, weights)
|
instance = MaxWeightStableSetInstance(graph, weights)
|
||||||
solver = LearningSolver()
|
solver = LearningSolver()
|
||||||
stats = solver.solve(instance)
|
stats = solver.solve(instance)
|
||||||
assert stats["Lower bound"] == 2.0
|
assert stats["Lower bound"] == 2.0
|
||||||
|
|
||||||
|
|
||||||
def test_stab_generator_fixed_graph():
|
def test_stab_generator_fixed_graph() -> None:
|
||||||
np.random.seed(42)
|
np.random.seed(42)
|
||||||
from miplearn.problems.stab import MaxWeightStableSetGenerator
|
from miplearn.problems.stab import MaxWeightStableSetGenerator
|
||||||
|
|
||||||
@@ -36,7 +36,7 @@ def test_stab_generator_fixed_graph():
|
|||||||
assert list(weights_avg_actual) == weights_avg_expected
|
assert list(weights_avg_actual) == weights_avg_expected
|
||||||
|
|
||||||
|
|
||||||
def test_stab_generator_random_graph():
|
def test_stab_generator_random_graph() -> None:
|
||||||
np.random.seed(42)
|
np.random.seed(42)
|
||||||
from miplearn.problems.stab import MaxWeightStableSetGenerator
|
from miplearn.problems.stab import MaxWeightStableSetGenerator
|
||||||
|
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ from miplearn.problems.tsp import TravelingSalesmanGenerator, TravelingSalesmanI
|
|||||||
from miplearn.solvers.learning import LearningSolver
|
from miplearn.solvers.learning import LearningSolver
|
||||||
|
|
||||||
|
|
||||||
def test_generator():
|
def test_generator() -> None:
|
||||||
instances = TravelingSalesmanGenerator(
|
instances = TravelingSalesmanGenerator(
|
||||||
x=uniform(loc=0.0, scale=1000.0),
|
x=uniform(loc=0.0, scale=1000.0),
|
||||||
y=uniform(loc=0.0, scale=1000.0),
|
y=uniform(loc=0.0, scale=1000.0),
|
||||||
@@ -26,7 +26,7 @@ def test_generator():
|
|||||||
assert np.std(d) > 0
|
assert np.std(d) > 0
|
||||||
|
|
||||||
|
|
||||||
def test_instance():
|
def test_instance() -> None:
|
||||||
n_cities = 4
|
n_cities = 4
|
||||||
distances = np.array(
|
distances = np.array(
|
||||||
[
|
[
|
||||||
@@ -40,6 +40,7 @@ def test_instance():
|
|||||||
solver = LearningSolver()
|
solver = LearningSolver()
|
||||||
stats = solver.solve(instance)
|
stats = solver.solve(instance)
|
||||||
solution = instance.training_data[0].solution
|
solution = instance.training_data[0].solution
|
||||||
|
assert solution is not None
|
||||||
assert solution["x[(0, 1)]"] == 1.0
|
assert solution["x[(0, 1)]"] == 1.0
|
||||||
assert solution["x[(0, 2)]"] == 0.0
|
assert solution["x[(0, 2)]"] == 0.0
|
||||||
assert solution["x[(0, 3)]"] == 1.0
|
assert solution["x[(0, 3)]"] == 1.0
|
||||||
@@ -50,7 +51,7 @@ def test_instance():
|
|||||||
assert stats["Upper bound"] == 4.0
|
assert stats["Upper bound"] == 4.0
|
||||||
|
|
||||||
|
|
||||||
def test_subtour():
|
def test_subtour() -> None:
|
||||||
n_cities = 6
|
n_cities = 6
|
||||||
cities = np.array(
|
cities = np.array(
|
||||||
[
|
[
|
||||||
@@ -66,8 +67,10 @@ def test_subtour():
|
|||||||
instance = TravelingSalesmanInstance(n_cities, distances)
|
instance = TravelingSalesmanInstance(n_cities, distances)
|
||||||
solver = LearningSolver()
|
solver = LearningSolver()
|
||||||
solver.solve(instance)
|
solver.solve(instance)
|
||||||
|
assert instance.training_data[0].lazy_enforced is not None
|
||||||
assert len(instance.training_data[0].lazy_enforced) > 0
|
assert len(instance.training_data[0].lazy_enforced) > 0
|
||||||
solution = instance.training_data[0].solution
|
solution = instance.training_data[0].solution
|
||||||
|
assert solution is not None
|
||||||
assert solution["x[(0, 1)]"] == 1.0
|
assert solution["x[(0, 1)]"] == 1.0
|
||||||
assert solution["x[(0, 4)]"] == 1.0
|
assert solution["x[(0, 4)]"] == 1.0
|
||||||
assert solution["x[(1, 2)]"] == 1.0
|
assert solution["x[(1, 2)]"] == 1.0
|
||||||
|
|||||||
@@ -5,6 +5,7 @@
|
|||||||
from inspect import isclass
|
from inspect import isclass
|
||||||
from typing import List, Callable, Any
|
from typing import List, Callable, Any
|
||||||
|
|
||||||
|
from miplearn.instance.base import Instance
|
||||||
from miplearn.problems.knapsack import KnapsackInstance, GurobiKnapsackInstance
|
from miplearn.problems.knapsack import KnapsackInstance, GurobiKnapsackInstance
|
||||||
from miplearn.solvers.gurobi import GurobiSolver
|
from miplearn.solvers.gurobi import GurobiSolver
|
||||||
from miplearn.solvers.internal import InternalSolver
|
from miplearn.solvers.internal import InternalSolver
|
||||||
@@ -19,7 +20,7 @@ def _is_subclass_or_instance(obj: Any, parent_class: Any) -> bool:
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def _get_knapsack_instance(solver):
|
def _get_knapsack_instance(solver: Any) -> Instance:
|
||||||
if _is_subclass_or_instance(solver, BasePyomoSolver):
|
if _is_subclass_or_instance(solver, BasePyomoSolver):
|
||||||
return KnapsackInstance(
|
return KnapsackInstance(
|
||||||
weights=[23.0, 26.0, 20.0, 18.0],
|
weights=[23.0, 26.0, 20.0, 18.0],
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ from ..fixtures.infeasible import get_infeasible_instance
|
|||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def test_redirect_output():
|
def test_redirect_output() -> None:
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
original_stdout = sys.stdout
|
original_stdout = sys.stdout
|
||||||
@@ -31,7 +31,7 @@ def test_redirect_output():
|
|||||||
assert io.getvalue() == "Hello world\n"
|
assert io.getvalue() == "Hello world\n"
|
||||||
|
|
||||||
|
|
||||||
def test_internal_solver_warm_starts():
|
def test_internal_solver_warm_starts() -> None:
|
||||||
for solver in get_internal_solvers():
|
for solver in get_internal_solvers():
|
||||||
logger.info("Solver: %s" % solver)
|
logger.info("Solver: %s" % solver)
|
||||||
instance = _get_knapsack_instance(solver)
|
instance = _get_knapsack_instance(solver)
|
||||||
@@ -54,7 +54,7 @@ def test_internal_solver_warm_starts():
|
|||||||
assert stats["Upper bound"] == 725.0
|
assert stats["Upper bound"] == 725.0
|
||||||
|
|
||||||
|
|
||||||
def test_internal_solver():
|
def test_internal_solver() -> None:
|
||||||
for solver in get_internal_solvers():
|
for solver in get_internal_solvers():
|
||||||
logger.info("Solver: %s" % solver)
|
logger.info("Solver: %s" % solver)
|
||||||
|
|
||||||
@@ -64,26 +64,37 @@ def test_internal_solver():
|
|||||||
|
|
||||||
assert solver.get_variable_names() == ["x[0]", "x[1]", "x[2]", "x[3]"]
|
assert solver.get_variable_names() == ["x[0]", "x[1]", "x[2]", "x[3]"]
|
||||||
|
|
||||||
stats = solver.solve_lp()
|
lp_stats = solver.solve_lp()
|
||||||
assert not solver.is_infeasible()
|
assert not solver.is_infeasible()
|
||||||
assert round(stats["LP value"], 3) == 1287.923
|
assert lp_stats["LP value"] is not None
|
||||||
assert len(stats["LP log"]) > 100
|
assert round(lp_stats["LP value"], 3) == 1287.923
|
||||||
|
assert len(lp_stats["LP log"]) > 100
|
||||||
|
|
||||||
solution = solver.get_solution()
|
solution = solver.get_solution()
|
||||||
|
assert solution is not None
|
||||||
|
assert solution["x[0]"] is not None
|
||||||
|
assert solution["x[1]"] is not None
|
||||||
|
assert solution["x[2]"] is not None
|
||||||
|
assert solution["x[3]"] is not None
|
||||||
assert round(solution["x[0]"], 3) == 1.000
|
assert round(solution["x[0]"], 3) == 1.000
|
||||||
assert round(solution["x[1]"], 3) == 0.923
|
assert round(solution["x[1]"], 3) == 0.923
|
||||||
assert round(solution["x[2]"], 3) == 1.000
|
assert round(solution["x[2]"], 3) == 1.000
|
||||||
assert round(solution["x[3]"], 3) == 0.000
|
assert round(solution["x[3]"], 3) == 0.000
|
||||||
|
|
||||||
stats = solver.solve(tee=True)
|
mip_stats = solver.solve(tee=True)
|
||||||
assert not solver.is_infeasible()
|
assert not solver.is_infeasible()
|
||||||
assert len(stats["MIP log"]) > 100
|
assert len(mip_stats["MIP log"]) > 100
|
||||||
assert stats["Lower bound"] == 1183.0
|
assert mip_stats["Lower bound"] == 1183.0
|
||||||
assert stats["Upper bound"] == 1183.0
|
assert mip_stats["Upper bound"] == 1183.0
|
||||||
assert stats["Sense"] == "max"
|
assert mip_stats["Sense"] == "max"
|
||||||
assert isinstance(stats["Wallclock time"], float)
|
assert isinstance(mip_stats["Wallclock time"], float)
|
||||||
|
|
||||||
solution = solver.get_solution()
|
solution = solver.get_solution()
|
||||||
|
assert solution is not None
|
||||||
|
assert solution["x[0]"] is not None
|
||||||
|
assert solution["x[1]"] is not None
|
||||||
|
assert solution["x[2]"] is not None
|
||||||
|
assert solution["x[3]"] is not None
|
||||||
assert solution["x[0]"] == 1.0
|
assert solution["x[0]"] == 1.0
|
||||||
assert solution["x[1]"] == 0.0
|
assert solution["x[1]"] == 0.0
|
||||||
assert solution["x[2]"] == 1.0
|
assert solution["x[2]"] == 1.0
|
||||||
@@ -143,43 +154,45 @@ def test_internal_solver():
|
|||||||
solver.relax()
|
solver.relax()
|
||||||
solver.set_constraint_sense("cut", "=")
|
solver.set_constraint_sense("cut", "=")
|
||||||
stats = solver.solve()
|
stats = solver.solve()
|
||||||
|
assert stats["Lower bound"] is not None
|
||||||
assert round(stats["Lower bound"]) == 1030.0
|
assert round(stats["Lower bound"]) == 1030.0
|
||||||
assert round(solver.get_dual("eq_capacity")) == 0.0
|
assert round(solver.get_dual("eq_capacity")) == 0.0
|
||||||
|
|
||||||
|
|
||||||
def test_relax():
|
def test_relax() -> None:
|
||||||
for solver in get_internal_solvers():
|
for solver in get_internal_solvers():
|
||||||
instance = _get_knapsack_instance(solver)
|
instance = _get_knapsack_instance(solver)
|
||||||
solver.set_instance(instance)
|
solver.set_instance(instance)
|
||||||
solver.relax()
|
solver.relax()
|
||||||
stats = solver.solve()
|
stats = solver.solve()
|
||||||
|
assert stats["Lower bound"] is not None
|
||||||
assert round(stats["Lower bound"]) == 1288.0
|
assert round(stats["Lower bound"]) == 1288.0
|
||||||
|
|
||||||
|
|
||||||
def test_infeasible_instance():
|
def test_infeasible_instance() -> None:
|
||||||
for solver in get_internal_solvers():
|
for solver in get_internal_solvers():
|
||||||
instance = get_infeasible_instance(solver)
|
instance = get_infeasible_instance(solver)
|
||||||
solver.set_instance(instance)
|
solver.set_instance(instance)
|
||||||
stats = solver.solve()
|
mip_stats = solver.solve()
|
||||||
|
|
||||||
assert solver.is_infeasible()
|
assert solver.is_infeasible()
|
||||||
assert solver.get_solution() is None
|
assert solver.get_solution() is None
|
||||||
assert stats["Upper bound"] is None
|
assert mip_stats["Upper bound"] is None
|
||||||
assert stats["Lower bound"] is None
|
assert mip_stats["Lower bound"] is None
|
||||||
|
|
||||||
stats = solver.solve_lp()
|
lp_stats = solver.solve_lp()
|
||||||
assert solver.get_solution() is None
|
assert solver.get_solution() is None
|
||||||
assert stats["LP value"] is None
|
assert lp_stats["LP value"] is None
|
||||||
|
|
||||||
|
|
||||||
def test_iteration_cb():
|
def test_iteration_cb() -> None:
|
||||||
for solver in get_internal_solvers():
|
for solver in get_internal_solvers():
|
||||||
logger.info("Solver: %s" % solver)
|
logger.info("Solver: %s" % solver)
|
||||||
instance = _get_knapsack_instance(solver)
|
instance = _get_knapsack_instance(solver)
|
||||||
solver.set_instance(instance)
|
solver.set_instance(instance)
|
||||||
count = 0
|
count = 0
|
||||||
|
|
||||||
def custom_iteration_cb():
|
def custom_iteration_cb() -> bool:
|
||||||
nonlocal count
|
nonlocal count
|
||||||
count += 1
|
count += 1
|
||||||
return count < 5
|
return count < 5
|
||||||
|
|||||||
@@ -3,19 +3,21 @@
|
|||||||
# Released under the modified BSD license. See COPYING.md for more details.
|
# Released under the modified BSD license. See COPYING.md for more details.
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
from miplearn import InternalSolver
|
||||||
from miplearn.solvers.gurobi import GurobiSolver
|
from miplearn.solvers.gurobi import GurobiSolver
|
||||||
from . import _get_knapsack_instance
|
from . import _get_knapsack_instance
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def test_lazy_cb():
|
def test_lazy_cb() -> None:
|
||||||
solver = GurobiSolver()
|
solver = GurobiSolver()
|
||||||
instance = _get_knapsack_instance(solver)
|
instance = _get_knapsack_instance(solver)
|
||||||
model = instance.to_model()
|
model = instance.to_model()
|
||||||
|
|
||||||
def lazy_cb(cb_solver, cb_model):
|
def lazy_cb(cb_solver: InternalSolver, cb_model: Any) -> None:
|
||||||
cobj = (cb_model.getVarByName("x[0]") * 1.0, "<", 0.0, "cut")
|
cobj = (cb_model.getVarByName("x[0]") * 1.0, "<", 0.0, "cut")
|
||||||
if not cb_solver.is_constraint_satisfied(cobj):
|
if not cb_solver.is_constraint_satisfied(cobj):
|
||||||
cb_solver.add_constraint(cobj)
|
cb_solver.add_constraint(cobj)
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ from . import _get_knapsack_instance, get_internal_solvers
|
|||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def test_learning_solver():
|
def test_learning_solver() -> None:
|
||||||
for mode in ["exact", "heuristic"]:
|
for mode in ["exact", "heuristic"]:
|
||||||
for internal_solver in get_internal_solvers():
|
for internal_solver in get_internal_solvers():
|
||||||
logger.info("Solver: %s" % internal_solver)
|
logger.info("Solver: %s" % internal_solver)
|
||||||
@@ -30,17 +30,21 @@ def test_learning_solver():
|
|||||||
assert hasattr(instance, "features")
|
assert hasattr(instance, "features")
|
||||||
|
|
||||||
sample = instance.training_data[0]
|
sample = instance.training_data[0]
|
||||||
|
assert sample.solution is not None
|
||||||
assert sample.solution["x[0]"] == 1.0
|
assert sample.solution["x[0]"] == 1.0
|
||||||
assert sample.solution["x[1]"] == 0.0
|
assert sample.solution["x[1]"] == 0.0
|
||||||
assert sample.solution["x[2]"] == 1.0
|
assert sample.solution["x[2]"] == 1.0
|
||||||
assert sample.solution["x[3]"] == 1.0
|
assert sample.solution["x[3]"] == 1.0
|
||||||
assert sample.lower_bound == 1183.0
|
assert sample.lower_bound == 1183.0
|
||||||
assert sample.upper_bound == 1183.0
|
assert sample.upper_bound == 1183.0
|
||||||
|
assert sample.lp_solution is not None
|
||||||
assert round(sample.lp_solution["x[0]"], 3) == 1.000
|
assert round(sample.lp_solution["x[0]"], 3) == 1.000
|
||||||
assert round(sample.lp_solution["x[1]"], 3) == 0.923
|
assert round(sample.lp_solution["x[1]"], 3) == 0.923
|
||||||
assert round(sample.lp_solution["x[2]"], 3) == 1.000
|
assert round(sample.lp_solution["x[2]"], 3) == 1.000
|
||||||
assert round(sample.lp_solution["x[3]"], 3) == 0.000
|
assert round(sample.lp_solution["x[3]"], 3) == 0.000
|
||||||
|
assert sample.lp_value is not None
|
||||||
assert round(sample.lp_value, 3) == 1287.923
|
assert round(sample.lp_value, 3) == 1287.923
|
||||||
|
assert sample.mip_log is not None
|
||||||
assert len(sample.mip_log) > 100
|
assert len(sample.mip_log) > 100
|
||||||
|
|
||||||
solver.fit([instance])
|
solver.fit([instance])
|
||||||
@@ -51,7 +55,7 @@ def test_learning_solver():
|
|||||||
dill.dump(solver, file)
|
dill.dump(solver, file)
|
||||||
|
|
||||||
|
|
||||||
def test_solve_without_lp():
|
def test_solve_without_lp() -> None:
|
||||||
for internal_solver in get_internal_solvers():
|
for internal_solver in get_internal_solvers():
|
||||||
logger.info("Solver: %s" % internal_solver)
|
logger.info("Solver: %s" % internal_solver)
|
||||||
instance = _get_knapsack_instance(internal_solver)
|
instance = _get_knapsack_instance(internal_solver)
|
||||||
@@ -64,7 +68,7 @@ def test_solve_without_lp():
|
|||||||
solver.solve(instance)
|
solver.solve(instance)
|
||||||
|
|
||||||
|
|
||||||
def test_parallel_solve():
|
def test_parallel_solve() -> None:
|
||||||
for internal_solver in get_internal_solvers():
|
for internal_solver in get_internal_solvers():
|
||||||
instances = [_get_knapsack_instance(internal_solver) for _ in range(10)]
|
instances = [_get_knapsack_instance(internal_solver) for _ in range(10)]
|
||||||
solver = LearningSolver(solver=internal_solver)
|
solver = LearningSolver(solver=internal_solver)
|
||||||
@@ -72,10 +76,11 @@ def test_parallel_solve():
|
|||||||
assert len(results) == 10
|
assert len(results) == 10
|
||||||
for instance in instances:
|
for instance in instances:
|
||||||
data = instance.training_data[0]
|
data = instance.training_data[0]
|
||||||
|
assert data.solution is not None
|
||||||
assert len(data.solution.keys()) == 4
|
assert len(data.solution.keys()) == 4
|
||||||
|
|
||||||
|
|
||||||
def test_solve_fit_from_disk():
|
def test_solve_fit_from_disk() -> None:
|
||||||
for internal_solver in get_internal_solvers():
|
for internal_solver in get_internal_solvers():
|
||||||
# Create instances and pickle them
|
# Create instances and pickle them
|
||||||
instances = []
|
instances = []
|
||||||
@@ -108,7 +113,7 @@ def test_solve_fit_from_disk():
|
|||||||
os.remove(instance.filename)
|
os.remove(instance.filename)
|
||||||
|
|
||||||
|
|
||||||
def test_simulate_perfect():
|
def test_simulate_perfect() -> None:
|
||||||
internal_solver = GurobiSolver()
|
internal_solver = GurobiSolver()
|
||||||
instance = _get_knapsack_instance(internal_solver)
|
instance = _get_knapsack_instance(internal_solver)
|
||||||
with tempfile.NamedTemporaryFile(suffix=".pkl", delete=False) as tmp:
|
with tempfile.NamedTemporaryFile(suffix=".pkl", delete=False) as tmp:
|
||||||
@@ -121,7 +126,7 @@ def test_simulate_perfect():
|
|||||||
assert stats["Lower bound"] == stats["Objective: Predicted lower bound"]
|
assert stats["Lower bound"] == stats["Objective: Predicted lower bound"]
|
||||||
|
|
||||||
|
|
||||||
def test_gap():
|
def test_gap() -> None:
|
||||||
assert LearningSolver._compute_gap(ub=0.0, lb=0.0) == 0.0
|
assert LearningSolver._compute_gap(ub=0.0, lb=0.0) == 0.0
|
||||||
assert LearningSolver._compute_gap(ub=1.0, lb=0.5) == 0.5
|
assert LearningSolver._compute_gap(ub=1.0, lb=0.5) == 0.5
|
||||||
assert LearningSolver._compute_gap(ub=1.0, lb=1.0) == 0.0
|
assert LearningSolver._compute_gap(ub=1.0, lb=1.0) == 0.0
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ from miplearn.problems.stab import MaxWeightStableSetGenerator
|
|||||||
from miplearn.solvers.learning import LearningSolver
|
from miplearn.solvers.learning import LearningSolver
|
||||||
|
|
||||||
|
|
||||||
def test_benchmark():
|
def test_benchmark() -> None:
|
||||||
for n_jobs in [1, 4]:
|
for n_jobs in [1, 4]:
|
||||||
# Generate training and test instances
|
# Generate training and test instances
|
||||||
generator = MaxWeightStableSetGenerator(n=randint(low=25, high=26))
|
generator = MaxWeightStableSetGenerator(n=randint(low=25, high=26))
|
||||||
|
|||||||
Reference in New Issue
Block a user