From 872ef0eb064af1864c9fc4d5332057cf8e4adca6 Mon Sep 17 00:00:00 2001 From: Alinson S Xavier Date: Tue, 19 Jan 2021 09:46:05 -0600 Subject: [PATCH] Benchmark: Move relative statistics to benchmark script --- benchmark/benchmark.py | 24 +++++++++++++++--------- miplearn/benchmark.py | 19 ------------------- miplearn/tests/test_benchmark.py | 4 ++-- 3 files changed, 17 insertions(+), 30 deletions(-) diff --git a/benchmark/benchmark.py b/benchmark/benchmark.py index afe43d4..0f02d0f 100755 --- a/benchmark/benchmark.py +++ b/benchmark/benchmark.py @@ -31,6 +31,9 @@ import glob from docopt import docopt from numpy import median from pathlib import Path +import pandas as pd +import matplotlib.pyplot as plt +import seaborn as sns from miplearn import ( LearningSolver, @@ -132,16 +135,19 @@ def test_ml(): def charts(): - import matplotlib.pyplot as plt - import seaborn as sns - sns.set_style("whitegrid") sns.set_palette("Blues_r") - benchmark = BenchmarkRunner({}) - benchmark.load_results(f"{basepath}/benchmark_baseline.csv") - benchmark.load_results(f"{basepath}/benchmark_ml.csv") - results = benchmark.raw_results() - results["Gap (%)"] = results["Gap"] * 100.0 + + csv_files = [ + f"{basepath}/benchmark_baseline.csv", + f"{basepath}/benchmark_ml.csv", + ] + results = pd.concat(map(pd.read_csv, csv_files)) + groups = results.groupby("Instance") + best_lower_bound = groups["Lower bound"].transform("max") + best_upper_bound = groups["Upper bound"].transform("min") + results["Relative lower bound"] = results["Lower bound"] / best_lower_bound + results["Relative upper bound"] = results["Upper bound"] / best_upper_bound sense = results.loc[0, "Sense"] if (sense == "min").any(): @@ -187,7 +193,7 @@ def charts(): ax2.set_ylim(-0.5, 5.5) sns.stripplot( x="Solver", - y="Gap (%)", + y="Gap", jitter=0.25, data=results[results["Solver"] != "ml-heuristic"], ax=ax2, diff --git a/miplearn/benchmark.py b/miplearn/benchmark.py index f3b3dee..d11c354 100644 --- a/miplearn/benchmark.py +++ b/miplearn/benchmark.py @@ -94,25 +94,6 @@ class BenchmarkRunner: result["Mode"] = solver.mode self.results = self.results.append(pd.DataFrame([result])) - # Compute relative statistics - groups = self.results.groupby("Instance") - best_lower_bound = groups["Lower bound"].transform("max") - best_upper_bound = groups["Upper bound"].transform("min") - best_gap = groups["Gap"].transform("min") - best_nodes = np.maximum(1, groups["Nodes"].transform("min")) - best_wallclock_time = groups["Wallclock time"].transform("min") - self.results["Relative lower bound"] = ( - self.results["Lower bound"] / best_lower_bound - ) - self.results["Relative upper bound"] = ( - self.results["Upper bound"] / best_upper_bound - ) - self.results["Relative wallclock time"] = ( - self.results["Wallclock time"] / best_wallclock_time - ) - self.results["Relative Gap"] = self.results["Gap"] / best_gap - self.results["Relative Nodes"] = self.results["Nodes"] / best_nodes - def _silence_miplearn_logger(self): miplearn_logger = logging.getLogger("miplearn") self.prev_log_level = miplearn_logger.getEffectiveLevel() diff --git a/miplearn/tests/test_benchmark.py b/miplearn/tests/test_benchmark.py index 64e2a26..d7bb0cf 100644 --- a/miplearn/tests/test_benchmark.py +++ b/miplearn/tests/test_benchmark.py @@ -27,11 +27,11 @@ def test_benchmark(): benchmark = BenchmarkRunner(test_solvers) benchmark.fit(train_instances) benchmark.parallel_solve(test_instances, n_jobs=2, n_trials=2) - assert benchmark.raw_results().values.shape == (12, 19) + assert benchmark.raw_results().values.shape == (12, 14) benchmark.save_results("/tmp/benchmark.csv") assert os.path.isfile("/tmp/benchmark.csv") benchmark = BenchmarkRunner(test_solvers) benchmark.load_results("/tmp/benchmark.csv") - assert benchmark.raw_results().values.shape == (12, 19) + assert benchmark.raw_results().values.shape == (12, 14)