From beb15f766721db3e4821143c4d2ae02da6a19da4 Mon Sep 17 00:00:00 2001 From: Alinson S Xavier Date: Fri, 10 Sep 2021 16:35:17 -0500 Subject: [PATCH] Remove obsolete benchmark files --- Makefile | 1 - benchmark/Makefile | 31 ---- benchmark/__init__.py | 0 benchmark/benchmark.py | 268 ---------------------------------- miplearn/problems/knapsack.py | 45 ------ miplearn/problems/stab.py | 47 +----- miplearn/problems/tsp.py | 24 --- 7 files changed, 1 insertion(+), 415 deletions(-) delete mode 100644 benchmark/Makefile delete mode 100644 benchmark/__init__.py delete mode 100755 benchmark/benchmark.py diff --git a/Makefile b/Makefile index 9a6c54a..9808817 100644 --- a/Makefile +++ b/Makefile @@ -44,7 +44,6 @@ test: rm -rf .mypy_cache $(MYPY) -p miplearn $(MYPY) -p tests - $(MYPY) -p benchmark $(PYTEST) $(PYTEST_ARGS) .PHONY: test test-watch docs install dist diff --git a/benchmark/Makefile b/benchmark/Makefile deleted file mode 100644 index c56081a..0000000 --- a/benchmark/Makefile +++ /dev/null @@ -1,31 +0,0 @@ -# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization -# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved. -# Released under the modified BSD license. See COPYING.md for more details. -# Written by Alinson S. Xavier - -CHALLENGES := \ - stab/ChallengeA \ - knapsack/ChallengeA \ - tsp/ChallengeA - -test: $(addsuffix /performance.png, $(CHALLENGES)) - -train: $(addsuffix /train/done, $(CHALLENGES)) - -%/train/done: - python benchmark.py train $* - -%/benchmark_baseline.csv: %/train/done - python benchmark.py test-baseline $* - -%/benchmark_ml.csv: %/benchmark_baseline.csv - python benchmark.py test-ml $* - -%/performance.png: %/benchmark_ml.csv - python benchmark.py charts $* - -clean: - rm -rvf $(CHALLENGES) - -.PHONY: clean -.SECONDARY: diff --git a/benchmark/__init__.py b/benchmark/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/benchmark/benchmark.py b/benchmark/benchmark.py deleted file mode 100755 index 47c2a14..0000000 --- a/benchmark/benchmark.py +++ /dev/null @@ -1,268 +0,0 @@ -#!/usr/bin/env python -# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization -# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved. -# Released under the modified BSD license. See COPYING.md for more details. - -"""MIPLearn Benchmark Scripts - -Usage: - benchmark.py train [options] - benchmark.py test-baseline [options] - benchmark.py test-ml [options] - benchmark.py charts - -Options: - -h --help Show this screen - --train-jobs= Number of instances to solve in parallel during training [default: 10] - --train-time-limit= Solver time limit during training in seconds [default: 900] - --test-jobs= Number of instances to solve in parallel during test [default: 5] - --test-time-limit= Solver time limit during test in seconds [default: 900] - --solver-threads= Number of threads the solver is allowed to use [default: 4] -""" -import glob -import importlib -import logging -import os -from pathlib import Path -from typing import Dict, List - -import matplotlib.pyplot as plt -import pandas as pd -import seaborn as sns -from docopt import docopt -from numpy import median - -from miplearn import ( - LearningSolver, - BenchmarkRunner, - GurobiPyomoSolver, - setup_logger, - PickleGzInstance, - write_pickle_gz_multiple, - Instance, -) - -setup_logger() -logging.getLogger("gurobipy").setLevel(logging.ERROR) -logging.getLogger("pyomo.core").setLevel(logging.ERROR) -logger = logging.getLogger("benchmark") - - -def train(args: Dict) -> None: - basepath = args[""] - problem_name, challenge_name = args[""].split("/") - pkg = importlib.import_module(f"miplearn.problems.{problem_name}") - challenge = getattr(pkg, challenge_name)() - - if not os.path.isdir(f"{basepath}/train"): - write_pickle_gz_multiple(challenge.training_instances, f"{basepath}/train") - write_pickle_gz_multiple(challenge.test_instances, f"{basepath}/test") - - done_filename = f"{basepath}/train/done" - if not os.path.isfile(done_filename): - train_instances: List[Instance] = [ - PickleGzInstance(f) for f in glob.glob(f"{basepath}/train/*.gz") - ] - solver = LearningSolver( - solver=GurobiPyomoSolver( - params={ - "TimeLimit": int(args["--train-time-limit"]), - "Threads": int(args["--solver-threads"]), - } - ), - ) - solver.parallel_solve( - train_instances, - n_jobs=int(args["--train-jobs"]), - ) - Path(done_filename).touch(exist_ok=True) - - -def test_baseline(args: Dict) -> None: - basepath = args[""] - test_instances: List[Instance] = [ - PickleGzInstance(f) for f in glob.glob(f"{basepath}/test/*.gz") - ] - csv_filename = f"{basepath}/benchmark_baseline.csv" - if not os.path.isfile(csv_filename): - solvers = { - "baseline": LearningSolver( - solver=GurobiPyomoSolver( - params={ - "TimeLimit": int(args["--test-time-limit"]), - "Threads": int(args["--solver-threads"]), - } - ), - ), - } - benchmark = BenchmarkRunner(solvers) - benchmark.parallel_solve( - test_instances, - n_jobs=int(args["--test-jobs"]), - ) - benchmark.write_csv(csv_filename) - - -def test_ml(args: Dict) -> None: - basepath = args[""] - test_instances: List[Instance] = [ - PickleGzInstance(f) for f in glob.glob(f"{basepath}/test/*.gz") - ] - train_instances: List[Instance] = [ - PickleGzInstance(f) for f in glob.glob(f"{basepath}/train/*.gz") - ] - csv_filename = f"{basepath}/benchmark_ml.csv" - if not os.path.isfile(csv_filename): - solvers = { - "ml-exact": LearningSolver( - solver=GurobiPyomoSolver( - params={ - "TimeLimit": int(args["--test-time-limit"]), - "Threads": int(args["--solver-threads"]), - } - ), - ), - "ml-heuristic": LearningSolver( - solver=GurobiPyomoSolver( - params={ - "TimeLimit": int(args["--test-time-limit"]), - "Threads": int(args["--solver-threads"]), - } - ), - mode="heuristic", - ), - } - benchmark = BenchmarkRunner(solvers) - benchmark.fit(train_instances) - benchmark.parallel_solve( - test_instances, - n_jobs=int(args["--test-jobs"]), - ) - benchmark.write_csv(csv_filename) - - -def charts(args: Dict) -> None: - basepath = args[""] - sns.set_style("whitegrid") - sns.set_palette("Blues_r") - - csv_files = [ - f"{basepath}/benchmark_baseline.csv", - f"{basepath}/benchmark_ml.csv", - ] - results = pd.concat(map(pd.read_csv, csv_files)) - groups = results.groupby("Instance") - best_lower_bound = groups["Lower bound"].transform("max") - best_upper_bound = groups["Upper bound"].transform("min") - results["Relative lower bound"] = results["Lower bound"] / best_lower_bound - results["Relative upper bound"] = results["Upper bound"] / best_upper_bound - - sense = results.loc[0, "Sense"] - if (sense == "min").any(): - primal_column = "Relative upper bound" - obj_column = "Upper bound" - predicted_obj_column = "Objective: Predicted upper bound" - else: - primal_column = "Relative lower bound" - obj_column = "Lower bound" - predicted_obj_column = "Objective: Predicted lower bound" - - palette = {"baseline": "#9b59b6", "ml-exact": "#3498db", "ml-heuristic": "#95a5a6"} - fig, (ax1, ax2, ax3, ax4) = plt.subplots( - nrows=1, - ncols=4, - figsize=(12, 4), - gridspec_kw={"width_ratios": [2, 1, 1, 2]}, - ) - - # Wallclock time - sns.stripplot( - x="Solver", - y="Wallclock time", - data=results, - ax=ax1, - jitter=0.25, - palette=palette, - size=4.0, - ) - sns.barplot( - x="Solver", - y="Wallclock time", - data=results, - ax=ax1, - errwidth=0.0, - alpha=0.4, - palette=palette, - estimator=median, - ) - ax1.set(ylabel="Wallclock time (s)") - - # Gap - ax2.set_ylim(-0.5, 5.5) - sns.stripplot( - x="Solver", - y="Gap", - jitter=0.25, - data=results[results["Solver"] != "ml-heuristic"], - ax=ax2, - palette=palette, - size=4.0, - ) - - # Relative primal bound - ax3.set_ylim(0.95, 1.05) - sns.stripplot( - x="Solver", - y=primal_column, - jitter=0.25, - data=results[results["Solver"] == "ml-heuristic"], - ax=ax3, - palette=palette, - ) - sns.scatterplot( - x=obj_column, - y=predicted_obj_column, - hue="Solver", - data=results[results["Solver"] == "ml-exact"], - ax=ax4, - palette=palette, - ) - - # Predicted vs actual primal bound - xlim, ylim = ax4.get_xlim(), ax4.get_ylim() - ax4.plot( - [-1e10, 1e10], - [-1e10, 1e10], - ls="-", - color="#cccccc", - ) - ax4.set_xlim(xlim) - ax4.set_ylim(ylim) - ax4.get_legend().remove() - ax4.set( - ylabel="Predicted value", - xlabel="Actual value", - ) - - fig.tight_layout() - plt.savefig( - f"{basepath}/performance.png", - bbox_inches="tight", - dpi=150, - ) - - -def main() -> None: - args = docopt(__doc__) - if args["train"]: - train(args) - if args["test-baseline"]: - test_baseline(args) - if args["test-ml"]: - test_ml(args) - if args["charts"]: - charts(args) - - -if __name__ == "__main__": - main() diff --git a/miplearn/problems/knapsack.py b/miplearn/problems/knapsack.py index 1dd06ef..67c17a6 100644 --- a/miplearn/problems/knapsack.py +++ b/miplearn/problems/knapsack.py @@ -13,38 +13,6 @@ from scipy.stats.distributions import rv_frozen from miplearn.instance.base import Instance -class ChallengeA: - """ - - 250 variables, 10 constraints, fixed weights - - w ~ U(0, 1000), jitter ~ U(0.95, 1.05) - - K = 500, u ~ U(0., 1.) - - alpha = 0.25 - """ - - def __init__( - self, - seed: int = 42, - n_training_instances: int = 500, - n_test_instances: int = 50, - ) -> None: - np.random.seed(seed) - self.gen = MultiKnapsackGenerator( - n=randint(low=250, high=251), - m=randint(low=10, high=11), - w=uniform(loc=0.0, scale=1000.0), - K=uniform(loc=500.0, scale=0.0), - u=uniform(loc=0.0, scale=1.0), - alpha=uniform(loc=0.25, scale=0.0), - fix_w=True, - w_jitter=uniform(loc=0.95, scale=0.1), - ) - np.random.seed(seed + 1) - self.training_instances = self.gen.generate(n_training_instances) - - np.random.seed(seed + 2) - self.test_instances = self.gen.generate(n_test_instances) - - class MultiKnapsackInstance(Instance): """Representation of the Multidimensional 0-1 Knapsack Problem. @@ -93,19 +61,6 @@ class MultiKnapsackInstance(Instance): return model - @overrides - def get_instance_features(self) -> np.ndarray: - return np.array([float(np.mean(self.prices))] + list(self.capacities)) - - @overrides - def get_variable_features(self, names: np.ndarray) -> np.ndarray: - features = [] - for i in range(len(self.weights)): - f = [self.prices[i]] - f.extend(self.weights[:, i]) - features.append(f) - return np.array(features) - # noinspection PyPep8Naming class MultiKnapsackGenerator: diff --git a/miplearn/problems/stab.py b/miplearn/problems/stab.py index a64fb3c..e638b88 100644 --- a/miplearn/problems/stab.py +++ b/miplearn/problems/stab.py @@ -1,6 +1,7 @@ # MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization # Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved. # Released under the modified BSD license. See COPYING.md for more details. + from typing import List, Dict import networkx as nx @@ -14,28 +15,6 @@ from scipy.stats.distributions import rv_frozen from miplearn.instance.base import Instance -class ChallengeA: - def __init__( - self, - seed: int = 42, - n_training_instances: int = 500, - n_test_instances: int = 50, - ) -> None: - np.random.seed(seed) - self.generator = MaxWeightStableSetGenerator( - w=uniform(loc=100.0, scale=50.0), - n=randint(low=200, high=201), - p=uniform(loc=0.05, scale=0.0), - fix_graph=True, - ) - - np.random.seed(seed + 1) - self.training_instances = self.generator.generate(n_training_instances) - - np.random.seed(seed + 2) - self.test_instances = self.generator.generate(n_test_instances) - - class MaxWeightStableSetInstance(Instance): """An instance of the Maximum-Weight Stable Set Problem. @@ -65,30 +44,6 @@ class MaxWeightStableSetInstance(Instance): model.clique_eqs.add(sum(model.x[v] for v in clique) <= 1) return model - @overrides - def get_variable_features(self, names: np.ndarray) -> np.ndarray: - features = [] - assert len(names) == len(self.nodes) - for i, v1 in enumerate(self.nodes): - assert names[i] == f"x[{v1}]".encode() - neighbor_weights = [0.0] * 15 - neighbor_degrees = [100.0] * 15 - for v2 in self.graph.neighbors(v1): - neighbor_weights += [self.weights[v2] / self.weights[v1]] - neighbor_degrees += [self.graph.degree(v2) / self.graph.degree(v1)] - neighbor_weights.sort(reverse=True) - neighbor_degrees.sort() - f = [] - f += neighbor_weights[:5] - f += neighbor_degrees[:5] - f += [self.graph.degree(v1)] - features.append(f) - return np.array(features) - - @overrides - def get_variable_categories(self, names: np.ndarray) -> np.ndarray: - return np.array(["default" for _ in names], dtype="S") - class MaxWeightStableSetGenerator: """Random instance generator for the Maximum-Weight Stable Set Problem. diff --git a/miplearn/problems/tsp.py b/miplearn/problems/tsp.py index b277e3a..b8e2cd7 100644 --- a/miplearn/problems/tsp.py +++ b/miplearn/problems/tsp.py @@ -17,30 +17,6 @@ from miplearn.solvers.pyomo.base import BasePyomoSolver from miplearn.types import ConstraintName -class ChallengeA: - def __init__( - self, - seed: int = 42, - n_training_instances: int = 500, - n_test_instances: int = 50, - ) -> None: - np.random.seed(seed) - self.generator = TravelingSalesmanGenerator( - x=uniform(loc=0.0, scale=1000.0), - y=uniform(loc=0.0, scale=1000.0), - n=randint(low=350, high=351), - gamma=uniform(loc=0.95, scale=0.1), - fix_cities=True, - round=True, - ) - - np.random.seed(seed + 1) - self.training_instances = self.generator.generate(n_training_instances) - - np.random.seed(seed + 2) - self.test_instances = self.generator.generate(n_test_instances) - - class TravelingSalesmanInstance(Instance): """An instance ot the Traveling Salesman Problem.