mirror of
https://github.com/ANL-CEEESA/MIPLearn.git
synced 2025-12-06 09:28:51 -06:00
Benchmark: Move relative statistics to benchmark script
This commit is contained in:
@@ -31,6 +31,9 @@ import glob
|
|||||||
from docopt import docopt
|
from docopt import docopt
|
||||||
from numpy import median
|
from numpy import median
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
import pandas as pd
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
import seaborn as sns
|
||||||
|
|
||||||
from miplearn import (
|
from miplearn import (
|
||||||
LearningSolver,
|
LearningSolver,
|
||||||
@@ -132,16 +135,19 @@ def test_ml():
|
|||||||
|
|
||||||
|
|
||||||
def charts():
|
def charts():
|
||||||
import matplotlib.pyplot as plt
|
|
||||||
import seaborn as sns
|
|
||||||
|
|
||||||
sns.set_style("whitegrid")
|
sns.set_style("whitegrid")
|
||||||
sns.set_palette("Blues_r")
|
sns.set_palette("Blues_r")
|
||||||
benchmark = BenchmarkRunner({})
|
|
||||||
benchmark.load_results(f"{basepath}/benchmark_baseline.csv")
|
csv_files = [
|
||||||
benchmark.load_results(f"{basepath}/benchmark_ml.csv")
|
f"{basepath}/benchmark_baseline.csv",
|
||||||
results = benchmark.raw_results()
|
f"{basepath}/benchmark_ml.csv",
|
||||||
results["Gap (%)"] = results["Gap"] * 100.0
|
]
|
||||||
|
results = pd.concat(map(pd.read_csv, csv_files))
|
||||||
|
groups = results.groupby("Instance")
|
||||||
|
best_lower_bound = groups["Lower bound"].transform("max")
|
||||||
|
best_upper_bound = groups["Upper bound"].transform("min")
|
||||||
|
results["Relative lower bound"] = results["Lower bound"] / best_lower_bound
|
||||||
|
results["Relative upper bound"] = results["Upper bound"] / best_upper_bound
|
||||||
|
|
||||||
sense = results.loc[0, "Sense"]
|
sense = results.loc[0, "Sense"]
|
||||||
if (sense == "min").any():
|
if (sense == "min").any():
|
||||||
@@ -187,7 +193,7 @@ def charts():
|
|||||||
ax2.set_ylim(-0.5, 5.5)
|
ax2.set_ylim(-0.5, 5.5)
|
||||||
sns.stripplot(
|
sns.stripplot(
|
||||||
x="Solver",
|
x="Solver",
|
||||||
y="Gap (%)",
|
y="Gap",
|
||||||
jitter=0.25,
|
jitter=0.25,
|
||||||
data=results[results["Solver"] != "ml-heuristic"],
|
data=results[results["Solver"] != "ml-heuristic"],
|
||||||
ax=ax2,
|
ax=ax2,
|
||||||
|
|||||||
@@ -94,25 +94,6 @@ class BenchmarkRunner:
|
|||||||
result["Mode"] = solver.mode
|
result["Mode"] = solver.mode
|
||||||
self.results = self.results.append(pd.DataFrame([result]))
|
self.results = self.results.append(pd.DataFrame([result]))
|
||||||
|
|
||||||
# Compute relative statistics
|
|
||||||
groups = self.results.groupby("Instance")
|
|
||||||
best_lower_bound = groups["Lower bound"].transform("max")
|
|
||||||
best_upper_bound = groups["Upper bound"].transform("min")
|
|
||||||
best_gap = groups["Gap"].transform("min")
|
|
||||||
best_nodes = np.maximum(1, groups["Nodes"].transform("min"))
|
|
||||||
best_wallclock_time = groups["Wallclock time"].transform("min")
|
|
||||||
self.results["Relative lower bound"] = (
|
|
||||||
self.results["Lower bound"] / best_lower_bound
|
|
||||||
)
|
|
||||||
self.results["Relative upper bound"] = (
|
|
||||||
self.results["Upper bound"] / best_upper_bound
|
|
||||||
)
|
|
||||||
self.results["Relative wallclock time"] = (
|
|
||||||
self.results["Wallclock time"] / best_wallclock_time
|
|
||||||
)
|
|
||||||
self.results["Relative Gap"] = self.results["Gap"] / best_gap
|
|
||||||
self.results["Relative Nodes"] = self.results["Nodes"] / best_nodes
|
|
||||||
|
|
||||||
def _silence_miplearn_logger(self):
|
def _silence_miplearn_logger(self):
|
||||||
miplearn_logger = logging.getLogger("miplearn")
|
miplearn_logger = logging.getLogger("miplearn")
|
||||||
self.prev_log_level = miplearn_logger.getEffectiveLevel()
|
self.prev_log_level = miplearn_logger.getEffectiveLevel()
|
||||||
|
|||||||
@@ -27,11 +27,11 @@ def test_benchmark():
|
|||||||
benchmark = BenchmarkRunner(test_solvers)
|
benchmark = BenchmarkRunner(test_solvers)
|
||||||
benchmark.fit(train_instances)
|
benchmark.fit(train_instances)
|
||||||
benchmark.parallel_solve(test_instances, n_jobs=2, n_trials=2)
|
benchmark.parallel_solve(test_instances, n_jobs=2, n_trials=2)
|
||||||
assert benchmark.raw_results().values.shape == (12, 19)
|
assert benchmark.raw_results().values.shape == (12, 14)
|
||||||
|
|
||||||
benchmark.save_results("/tmp/benchmark.csv")
|
benchmark.save_results("/tmp/benchmark.csv")
|
||||||
assert os.path.isfile("/tmp/benchmark.csv")
|
assert os.path.isfile("/tmp/benchmark.csv")
|
||||||
|
|
||||||
benchmark = BenchmarkRunner(test_solvers)
|
benchmark = BenchmarkRunner(test_solvers)
|
||||||
benchmark.load_results("/tmp/benchmark.csv")
|
benchmark.load_results("/tmp/benchmark.csv")
|
||||||
assert benchmark.raw_results().values.shape == (12, 19)
|
assert benchmark.raw_results().values.shape == (12, 14)
|
||||||
|
|||||||
Reference in New Issue
Block a user