From e9cd6d1715841a2e968907fc5ee80503c0c5f36f Mon Sep 17 00:00:00 2001 From: "Alinson S. Xavier" Date: Wed, 7 Apr 2021 21:25:30 -0500 Subject: [PATCH] Add types to remaining files; activate mypy's disallow_untyped_defs --- .mypy.ini | 2 +- Makefile | 1 + benchmark/__init__.py | 0 benchmark/benchmark.py | 15 ++++-- tests/classifiers/test_counting.py | 2 +- tests/classifiers/test_sklearn.py | 4 +- tests/classifiers/test_threshold.py | 2 +- tests/components/test_component.py | 8 ++-- tests/components/test_dynamic_user_cuts.py | 4 +- tests/components/test_primal.py | 11 +++-- tests/fixtures/infeasible.py | 3 +- tests/fixtures/knapsack.py | 5 +- tests/fixtures/redundant.py | 3 +- tests/problems/test_knapsack.py | 6 +-- tests/problems/test_stab.py | 8 ++-- tests/problems/test_tsp.py | 9 ++-- tests/solvers/__init__.py | 3 +- tests/solvers/test_internal_solver.py | 55 +++++++++++++--------- tests/solvers/test_lazy_cb.py | 6 ++- tests/solvers/test_learning_solver.py | 17 ++++--- tests/test_benchmark.py | 2 +- 21 files changed, 102 insertions(+), 64 deletions(-) create mode 100644 benchmark/__init__.py diff --git a/.mypy.ini b/.mypy.ini index 62d41bc..7bd8f7a 100644 --- a/.mypy.ini +++ b/.mypy.ini @@ -1,6 +1,6 @@ [mypy] ignore_missing_imports = True -#disallow_untyped_defs = True +disallow_untyped_defs = True disallow_untyped_calls = True disallow_incomplete_defs = True pretty = True diff --git a/Makefile b/Makefile index b900f90..91fa67f 100644 --- a/Makefile +++ b/Makefile @@ -44,6 +44,7 @@ reformat: test: $(MYPY) -p miplearn $(MYPY) -p tests + $(MYPY) -p benchmark $(PYTEST) $(PYTEST_ARGS) .PHONY: test test-watch docs install diff --git a/benchmark/__init__.py b/benchmark/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/benchmark/benchmark.py b/benchmark/benchmark.py index 77973d2..3e489dc 100755 --- a/benchmark/benchmark.py +++ b/benchmark/benchmark.py @@ -24,6 +24,7 @@ import importlib import logging import os from pathlib import Path +from typing import Dict import matplotlib.pyplot as plt import pandas as pd @@ -46,7 +47,7 @@ logging.getLogger("pyomo.core").setLevel(logging.ERROR) logger = logging.getLogger("benchmark") -def train(args): +def train(args: Dict) -> None: basepath = args[""] problem_name, challenge_name = args[""].split("/") pkg = importlib.import_module(f"miplearn.problems.{problem_name}") @@ -76,7 +77,7 @@ def train(args): Path(done_filename).touch(exist_ok=True) -def test_baseline(args): +def test_baseline(args: Dict) -> None: basepath = args[""] test_instances = [PickleGzInstance(f) for f in glob.glob(f"{basepath}/test/*.gz")] csv_filename = f"{basepath}/benchmark_baseline.csv" @@ -99,7 +100,7 @@ def test_baseline(args): benchmark.write_csv(csv_filename) -def test_ml(args): +def test_ml(args: Dict) -> None: basepath = args[""] test_instances = [PickleGzInstance(f) for f in glob.glob(f"{basepath}/test/*.gz")] train_instances = [PickleGzInstance(f) for f in glob.glob(f"{basepath}/train/*.gz")] @@ -133,7 +134,7 @@ def test_ml(args): benchmark.write_csv(csv_filename) -def charts(args): +def charts(args: Dict) -> None: basepath = args[""] sns.set_style("whitegrid") sns.set_palette("Blues_r") @@ -244,7 +245,7 @@ def charts(args): ) -if __name__ == "__main__": +def main() -> None: args = docopt(__doc__) if args["train"]: train(args) @@ -254,3 +255,7 @@ if __name__ == "__main__": test_ml(args) if args["charts"]: charts(args) + + +if __name__ == "__main__": + main() diff --git a/tests/classifiers/test_counting.py b/tests/classifiers/test_counting.py index 160a0d5..5030002 100644 --- a/tests/classifiers/test_counting.py +++ b/tests/classifiers/test_counting.py @@ -10,7 +10,7 @@ from miplearn.classifiers.counting import CountingClassifier E = 0.1 -def test_counting(): +def test_counting() -> None: clf = CountingClassifier() n_features = 25 x_train = np.zeros((8, n_features)) diff --git a/tests/classifiers/test_sklearn.py b/tests/classifiers/test_sklearn.py index 0c0d429..b281e0e 100644 --- a/tests/classifiers/test_sklearn.py +++ b/tests/classifiers/test_sklearn.py @@ -10,7 +10,7 @@ from sklearn.neighbors import KNeighborsClassifier from miplearn.classifiers.sklearn import ScikitLearnClassifier, ScikitLearnRegressor -def test_constant_prediction(): +def test_constant_prediction() -> None: x_train = np.array([[0.0, 1.0], [1.0, 0.0]]) y_train = np.array([[True, False], [True, False]]) clf = ScikitLearnClassifier(KNeighborsClassifier(n_neighbors=1)) @@ -22,7 +22,7 @@ def test_constant_prediction(): ) -def test_regressor(): +def test_regressor() -> None: x_train = np.array([[0.0, 1.0], [1.0, 4.0], [2.0, 2.0]]) y_train = np.array([[1.0], [5.0], [4.0]]) x_test = np.array([[4.0, 4.0], [0.0, 0.0]]) diff --git a/tests/classifiers/test_threshold.py b/tests/classifiers/test_threshold.py index e88209c..821e2be 100644 --- a/tests/classifiers/test_threshold.py +++ b/tests/classifiers/test_threshold.py @@ -10,7 +10,7 @@ from miplearn.classifiers import Classifier from miplearn.classifiers.threshold import MinPrecisionThreshold -def test_threshold_dynamic(): +def test_threshold_dynamic() -> None: clf = Mock(spec=Classifier) clf.predict_proba = Mock( return_value=np.array( diff --git a/tests/components/test_component.py b/tests/components/test_component.py index 5fd00d7..02d1b12 100644 --- a/tests/components/test_component.py +++ b/tests/components/test_component.py @@ -1,14 +1,16 @@ # MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization # Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved. # Released under the modified BSD license. See COPYING.md for more details. +from typing import Dict, Tuple from unittest.mock import Mock from miplearn.components.component import Component +from miplearn.features import Features, TrainingSample from miplearn.instance.base import Instance -def test_xy_instance(): - def _sample_xy(features, sample): +def test_xy_instance() -> None: + def _sample_xy(features: Features, sample: str) -> Tuple[Dict, Dict]: x = { "s1": { "category_a": [ @@ -58,7 +60,7 @@ def test_xy_instance(): instance_2 = Mock(spec=Instance) instance_2.training_data = ["s3"] instance_2.features = {} - comp.sample_xy = _sample_xy + comp.sample_xy = _sample_xy # type: ignore x_expected = { "category_a": [ [1, 2, 3], diff --git a/tests/components/test_dynamic_user_cuts.py b/tests/components/test_dynamic_user_cuts.py index f892db9..a1da5fb 100644 --- a/tests/components/test_dynamic_user_cuts.py +++ b/tests/components/test_dynamic_user_cuts.py @@ -3,7 +3,7 @@ # Released under the modified BSD license. See COPYING.md for more details. import logging -from typing import Any, FrozenSet, Hashable +from typing import Any, FrozenSet, Hashable, List import gurobipy as gp import networkx as nx @@ -39,7 +39,7 @@ class GurobiStableSetProblem(Instance): return True @overrides - def find_violated_user_cuts(self, model): + def find_violated_user_cuts(self, model: Any) -> List[FrozenSet]: assert isinstance(model, gp.Model) vals = model.cbGetNodeRel(model.getVars()) violations = [] diff --git a/tests/components/test_primal.py b/tests/components/test_primal.py index 8ad9795..9cd8a04 100644 --- a/tests/components/test_primal.py +++ b/tests/components/test_primal.py @@ -1,6 +1,7 @@ # MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization # Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved. # Released under the modified BSD license. See COPYING.md for more details. +from typing import cast from unittest.mock import Mock import numpy as np @@ -179,9 +180,9 @@ def test_predict() -> None: } -def test_fit_xy(): +def test_fit_xy() -> None: clf = Mock(spec=Classifier) - clf.clone = lambda: Mock(spec=Classifier) + clf.clone = lambda: Mock(spec=Classifier) # type: ignore thr = Mock(spec=Threshold) thr.clone = lambda: Mock(spec=Threshold) comp = PrimalSolutionComponent(classifier=clf, threshold=thr) @@ -197,17 +198,17 @@ def test_fit_xy(): for category in ["type-a", "type-b"]: assert category in comp.classifiers assert category in comp.thresholds - clf = comp.classifiers[category] + clf = comp.classifiers[category] # type: ignore clf.fit.assert_called_once() assert_array_equal(x[category], clf.fit.call_args[0][0]) assert_array_equal(y[category], clf.fit.call_args[0][1]) - thr = comp.thresholds[category] + thr = comp.thresholds[category] # type: ignore thr.fit.assert_called_once() assert_array_equal(x[category], thr.fit.call_args[0][1]) assert_array_equal(y[category], thr.fit.call_args[0][2]) -def test_usage(): +def test_usage() -> None: solver = LearningSolver( components=[ PrimalSolutionComponent(), diff --git a/tests/fixtures/infeasible.py b/tests/fixtures/infeasible.py index 09ba307..77b5037 100644 --- a/tests/fixtures/infeasible.py +++ b/tests/fixtures/infeasible.py @@ -36,8 +36,9 @@ class InfeasibleGurobiInstance(Instance): return model -def get_infeasible_instance(solver): +def get_infeasible_instance(solver: Any) -> Instance: if _is_subclass_or_instance(solver, BasePyomoSolver): return InfeasiblePyomoInstance() if _is_subclass_or_instance(solver, GurobiSolver): return InfeasibleGurobiInstance() + assert False diff --git a/tests/fixtures/knapsack.py b/tests/fixtures/knapsack.py index 8013d5f..04a9e93 100644 --- a/tests/fixtures/knapsack.py +++ b/tests/fixtures/knapsack.py @@ -1,6 +1,9 @@ # MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization # Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved. # Released under the modified BSD license. See COPYING.md for more details. + +from typing import List, Any, Tuple + from miplearn.instance.base import Instance from miplearn.problems.knapsack import KnapsackInstance, GurobiKnapsackInstance from miplearn.solvers.gurobi import GurobiSolver @@ -10,7 +13,7 @@ from miplearn.solvers.pyomo.base import BasePyomoSolver from tests.solvers import _is_subclass_or_instance -def get_test_pyomo_instances(): +def get_test_pyomo_instances() -> Tuple[List[Instance], List[Any]]: instances = [ KnapsackInstance( weights=[23.0, 26.0, 20.0, 18.0], diff --git a/tests/fixtures/redundant.py b/tests/fixtures/redundant.py index 4deeab0..27f6d96 100644 --- a/tests/fixtures/redundant.py +++ b/tests/fixtures/redundant.py @@ -34,8 +34,9 @@ class GurobiInstanceWithRedundancy(Instance): return model -def get_instance_with_redundancy(solver): +def get_instance_with_redundancy(solver: Any) -> Instance: if _is_subclass_or_instance(solver, BasePyomoSolver): return PyomoInstanceWithRedundancy() if _is_subclass_or_instance(solver, GurobiSolver): return GurobiInstanceWithRedundancy() + assert False diff --git a/tests/problems/test_knapsack.py b/tests/problems/test_knapsack.py index 64d76a1..32ef613 100644 --- a/tests/problems/test_knapsack.py +++ b/tests/problems/test_knapsack.py @@ -8,7 +8,7 @@ from scipy.stats import uniform, randint from miplearn.problems.knapsack import MultiKnapsackGenerator -def test_knapsack_generator(): +def test_knapsack_generator() -> None: gen = MultiKnapsackGenerator( n=randint(low=100, high=101), m=randint(low=30, high=31), @@ -20,5 +20,5 @@ def test_knapsack_generator(): instances = gen.generate(100) w_sum = sum(instance.weights for instance in instances) / len(instances) b_sum = sum(instance.capacities for instance in instances) / len(instances) - assert round(np.mean(w_sum), -1) == 500.0 - assert round(np.mean(b_sum), -3) == 25000.0 + assert round(float(np.mean(w_sum)), -1) == 500.0 + assert round(float(np.mean(b_sum)), -3) == 25000.0 diff --git a/tests/problems/test_stab.py b/tests/problems/test_stab.py index cea4810..b6763bb 100644 --- a/tests/problems/test_stab.py +++ b/tests/problems/test_stab.py @@ -10,16 +10,16 @@ from miplearn.problems.stab import MaxWeightStableSetInstance from miplearn.solvers.learning import LearningSolver -def test_stab(): +def test_stab() -> None: graph = nx.cycle_graph(5) - weights = [1.0, 1.0, 1.0, 1.0, 1.0] + weights = np.array([1.0, 1.0, 1.0, 1.0, 1.0]) instance = MaxWeightStableSetInstance(graph, weights) solver = LearningSolver() stats = solver.solve(instance) assert stats["Lower bound"] == 2.0 -def test_stab_generator_fixed_graph(): +def test_stab_generator_fixed_graph() -> None: np.random.seed(42) from miplearn.problems.stab import MaxWeightStableSetGenerator @@ -36,7 +36,7 @@ def test_stab_generator_fixed_graph(): assert list(weights_avg_actual) == weights_avg_expected -def test_stab_generator_random_graph(): +def test_stab_generator_random_graph() -> None: np.random.seed(42) from miplearn.problems.stab import MaxWeightStableSetGenerator diff --git a/tests/problems/test_tsp.py b/tests/problems/test_tsp.py index 0ea1a9f..7b10526 100644 --- a/tests/problems/test_tsp.py +++ b/tests/problems/test_tsp.py @@ -11,7 +11,7 @@ from miplearn.problems.tsp import TravelingSalesmanGenerator, TravelingSalesmanI from miplearn.solvers.learning import LearningSolver -def test_generator(): +def test_generator() -> None: instances = TravelingSalesmanGenerator( x=uniform(loc=0.0, scale=1000.0), y=uniform(loc=0.0, scale=1000.0), @@ -26,7 +26,7 @@ def test_generator(): assert np.std(d) > 0 -def test_instance(): +def test_instance() -> None: n_cities = 4 distances = np.array( [ @@ -40,6 +40,7 @@ def test_instance(): solver = LearningSolver() stats = solver.solve(instance) solution = instance.training_data[0].solution + assert solution is not None assert solution["x[(0, 1)]"] == 1.0 assert solution["x[(0, 2)]"] == 0.0 assert solution["x[(0, 3)]"] == 1.0 @@ -50,7 +51,7 @@ def test_instance(): assert stats["Upper bound"] == 4.0 -def test_subtour(): +def test_subtour() -> None: n_cities = 6 cities = np.array( [ @@ -66,8 +67,10 @@ def test_subtour(): instance = TravelingSalesmanInstance(n_cities, distances) solver = LearningSolver() solver.solve(instance) + assert instance.training_data[0].lazy_enforced is not None assert len(instance.training_data[0].lazy_enforced) > 0 solution = instance.training_data[0].solution + assert solution is not None assert solution["x[(0, 1)]"] == 1.0 assert solution["x[(0, 4)]"] == 1.0 assert solution["x[(1, 2)]"] == 1.0 diff --git a/tests/solvers/__init__.py b/tests/solvers/__init__.py index 34d546f..0305640 100644 --- a/tests/solvers/__init__.py +++ b/tests/solvers/__init__.py @@ -5,6 +5,7 @@ from inspect import isclass from typing import List, Callable, Any +from miplearn.instance.base import Instance from miplearn.problems.knapsack import KnapsackInstance, GurobiKnapsackInstance from miplearn.solvers.gurobi import GurobiSolver from miplearn.solvers.internal import InternalSolver @@ -19,7 +20,7 @@ def _is_subclass_or_instance(obj: Any, parent_class: Any) -> bool: ) -def _get_knapsack_instance(solver): +def _get_knapsack_instance(solver: Any) -> Instance: if _is_subclass_or_instance(solver, BasePyomoSolver): return KnapsackInstance( weights=[23.0, 26.0, 20.0, 18.0], diff --git a/tests/solvers/test_internal_solver.py b/tests/solvers/test_internal_solver.py index 1e1cc0e..584a635 100644 --- a/tests/solvers/test_internal_solver.py +++ b/tests/solvers/test_internal_solver.py @@ -20,7 +20,7 @@ from ..fixtures.infeasible import get_infeasible_instance logger = logging.getLogger(__name__) -def test_redirect_output(): +def test_redirect_output() -> None: import sys original_stdout = sys.stdout @@ -31,7 +31,7 @@ def test_redirect_output(): assert io.getvalue() == "Hello world\n" -def test_internal_solver_warm_starts(): +def test_internal_solver_warm_starts() -> None: for solver in get_internal_solvers(): logger.info("Solver: %s" % solver) instance = _get_knapsack_instance(solver) @@ -54,7 +54,7 @@ def test_internal_solver_warm_starts(): assert stats["Upper bound"] == 725.0 -def test_internal_solver(): +def test_internal_solver() -> None: for solver in get_internal_solvers(): logger.info("Solver: %s" % solver) @@ -64,26 +64,37 @@ def test_internal_solver(): assert solver.get_variable_names() == ["x[0]", "x[1]", "x[2]", "x[3]"] - stats = solver.solve_lp() + lp_stats = solver.solve_lp() assert not solver.is_infeasible() - assert round(stats["LP value"], 3) == 1287.923 - assert len(stats["LP log"]) > 100 + assert lp_stats["LP value"] is not None + assert round(lp_stats["LP value"], 3) == 1287.923 + assert len(lp_stats["LP log"]) > 100 solution = solver.get_solution() + assert solution is not None + assert solution["x[0]"] is not None + assert solution["x[1]"] is not None + assert solution["x[2]"] is not None + assert solution["x[3]"] is not None assert round(solution["x[0]"], 3) == 1.000 assert round(solution["x[1]"], 3) == 0.923 assert round(solution["x[2]"], 3) == 1.000 assert round(solution["x[3]"], 3) == 0.000 - stats = solver.solve(tee=True) + mip_stats = solver.solve(tee=True) assert not solver.is_infeasible() - assert len(stats["MIP log"]) > 100 - assert stats["Lower bound"] == 1183.0 - assert stats["Upper bound"] == 1183.0 - assert stats["Sense"] == "max" - assert isinstance(stats["Wallclock time"], float) + assert len(mip_stats["MIP log"]) > 100 + assert mip_stats["Lower bound"] == 1183.0 + assert mip_stats["Upper bound"] == 1183.0 + assert mip_stats["Sense"] == "max" + assert isinstance(mip_stats["Wallclock time"], float) solution = solver.get_solution() + assert solution is not None + assert solution["x[0]"] is not None + assert solution["x[1]"] is not None + assert solution["x[2]"] is not None + assert solution["x[3]"] is not None assert solution["x[0]"] == 1.0 assert solution["x[1]"] == 0.0 assert solution["x[2]"] == 1.0 @@ -143,43 +154,45 @@ def test_internal_solver(): solver.relax() solver.set_constraint_sense("cut", "=") stats = solver.solve() + assert stats["Lower bound"] is not None assert round(stats["Lower bound"]) == 1030.0 assert round(solver.get_dual("eq_capacity")) == 0.0 -def test_relax(): +def test_relax() -> None: for solver in get_internal_solvers(): instance = _get_knapsack_instance(solver) solver.set_instance(instance) solver.relax() stats = solver.solve() + assert stats["Lower bound"] is not None assert round(stats["Lower bound"]) == 1288.0 -def test_infeasible_instance(): +def test_infeasible_instance() -> None: for solver in get_internal_solvers(): instance = get_infeasible_instance(solver) solver.set_instance(instance) - stats = solver.solve() + mip_stats = solver.solve() assert solver.is_infeasible() assert solver.get_solution() is None - assert stats["Upper bound"] is None - assert stats["Lower bound"] is None + assert mip_stats["Upper bound"] is None + assert mip_stats["Lower bound"] is None - stats = solver.solve_lp() + lp_stats = solver.solve_lp() assert solver.get_solution() is None - assert stats["LP value"] is None + assert lp_stats["LP value"] is None -def test_iteration_cb(): +def test_iteration_cb() -> None: for solver in get_internal_solvers(): logger.info("Solver: %s" % solver) instance = _get_knapsack_instance(solver) solver.set_instance(instance) count = 0 - def custom_iteration_cb(): + def custom_iteration_cb() -> bool: nonlocal count count += 1 return count < 5 diff --git a/tests/solvers/test_lazy_cb.py b/tests/solvers/test_lazy_cb.py index a21477c..fcf3e8f 100644 --- a/tests/solvers/test_lazy_cb.py +++ b/tests/solvers/test_lazy_cb.py @@ -3,19 +3,21 @@ # Released under the modified BSD license. See COPYING.md for more details. import logging +from typing import Any +from miplearn import InternalSolver from miplearn.solvers.gurobi import GurobiSolver from . import _get_knapsack_instance logger = logging.getLogger(__name__) -def test_lazy_cb(): +def test_lazy_cb() -> None: solver = GurobiSolver() instance = _get_knapsack_instance(solver) model = instance.to_model() - def lazy_cb(cb_solver, cb_model): + def lazy_cb(cb_solver: InternalSolver, cb_model: Any) -> None: cobj = (cb_model.getVarByName("x[0]") * 1.0, "<", 0.0, "cut") if not cb_solver.is_constraint_satisfied(cobj): cb_solver.add_constraint(cobj) diff --git a/tests/solvers/test_learning_solver.py b/tests/solvers/test_learning_solver.py index efc460a..7a39a1b 100644 --- a/tests/solvers/test_learning_solver.py +++ b/tests/solvers/test_learning_solver.py @@ -16,7 +16,7 @@ from . import _get_knapsack_instance, get_internal_solvers logger = logging.getLogger(__name__) -def test_learning_solver(): +def test_learning_solver() -> None: for mode in ["exact", "heuristic"]: for internal_solver in get_internal_solvers(): logger.info("Solver: %s" % internal_solver) @@ -30,17 +30,21 @@ def test_learning_solver(): assert hasattr(instance, "features") sample = instance.training_data[0] + assert sample.solution is not None assert sample.solution["x[0]"] == 1.0 assert sample.solution["x[1]"] == 0.0 assert sample.solution["x[2]"] == 1.0 assert sample.solution["x[3]"] == 1.0 assert sample.lower_bound == 1183.0 assert sample.upper_bound == 1183.0 + assert sample.lp_solution is not None assert round(sample.lp_solution["x[0]"], 3) == 1.000 assert round(sample.lp_solution["x[1]"], 3) == 0.923 assert round(sample.lp_solution["x[2]"], 3) == 1.000 assert round(sample.lp_solution["x[3]"], 3) == 0.000 + assert sample.lp_value is not None assert round(sample.lp_value, 3) == 1287.923 + assert sample.mip_log is not None assert len(sample.mip_log) > 100 solver.fit([instance]) @@ -51,7 +55,7 @@ def test_learning_solver(): dill.dump(solver, file) -def test_solve_without_lp(): +def test_solve_without_lp() -> None: for internal_solver in get_internal_solvers(): logger.info("Solver: %s" % internal_solver) instance = _get_knapsack_instance(internal_solver) @@ -64,7 +68,7 @@ def test_solve_without_lp(): solver.solve(instance) -def test_parallel_solve(): +def test_parallel_solve() -> None: for internal_solver in get_internal_solvers(): instances = [_get_knapsack_instance(internal_solver) for _ in range(10)] solver = LearningSolver(solver=internal_solver) @@ -72,10 +76,11 @@ def test_parallel_solve(): assert len(results) == 10 for instance in instances: data = instance.training_data[0] + assert data.solution is not None assert len(data.solution.keys()) == 4 -def test_solve_fit_from_disk(): +def test_solve_fit_from_disk() -> None: for internal_solver in get_internal_solvers(): # Create instances and pickle them instances = [] @@ -108,7 +113,7 @@ def test_solve_fit_from_disk(): os.remove(instance.filename) -def test_simulate_perfect(): +def test_simulate_perfect() -> None: internal_solver = GurobiSolver() instance = _get_knapsack_instance(internal_solver) with tempfile.NamedTemporaryFile(suffix=".pkl", delete=False) as tmp: @@ -121,7 +126,7 @@ def test_simulate_perfect(): assert stats["Lower bound"] == stats["Objective: Predicted lower bound"] -def test_gap(): +def test_gap() -> None: assert LearningSolver._compute_gap(ub=0.0, lb=0.0) == 0.0 assert LearningSolver._compute_gap(ub=1.0, lb=0.5) == 0.5 assert LearningSolver._compute_gap(ub=1.0, lb=1.0) == 0.0 diff --git a/tests/test_benchmark.py b/tests/test_benchmark.py index 392465d..15ea3a6 100644 --- a/tests/test_benchmark.py +++ b/tests/test_benchmark.py @@ -11,7 +11,7 @@ from miplearn.problems.stab import MaxWeightStableSetGenerator from miplearn.solvers.learning import LearningSolver -def test_benchmark(): +def test_benchmark() -> None: for n_jobs in [1, 4]: # Generate training and test instances generator = MaxWeightStableSetGenerator(n=randint(low=25, high=26))