Add run_benchmarks method

master
Alinson S. Xavier 4 years ago
parent beb15f7667
commit 2fd04eb274

@ -11,6 +11,10 @@ import pandas as pd
from miplearn.components.component import Component
from miplearn.instance.base import Instance
from miplearn.solvers.learning import LearningSolver
from miplearn.solvers.pyomo.gurobi import GurobiPyomoSolver
from sklearn.utils._testing import ignore_warnings
from sklearn.exceptions import ConvergenceWarning
logger = logging.getLogger(__name__)
@ -20,20 +24,6 @@ class BenchmarkRunner:
Utility class that simplifies the task of comparing the performance of different
solvers.
Example
-------
```python
benchmark = BenchmarkRunner({
"Baseline": LearningSolver(...),
"Strategy A": LearningSolver(...),
"Strategy B": LearningSolver(...),
"Strategy C": LearningSolver(...),
})
benchmark.fit(train_instances)
benchmark.parallel_solve(test_instances, n_jobs=5)
benchmark.save_results("result.csv")
```
Parameters
----------
solvers: Dict[str, LearningSolver]
@ -55,7 +45,8 @@ class BenchmarkRunner:
self,
instances: List[Instance],
n_jobs: int = 1,
n_trials: int = 3,
n_trials: int = 1,
progress: bool = False,
) -> None:
"""
Solves the given instances in parallel and collect benchmark statistics.
@ -77,8 +68,9 @@ class BenchmarkRunner:
results = solver.parallel_solve(
trials,
n_jobs=n_jobs,
label="Solve (%s)" % solver_name,
label="solve (%s)" % solver_name,
discard_outputs=True,
progress=progress,
)
for i in range(len(trials)):
idx = i % len(instances)
@ -99,7 +91,12 @@ class BenchmarkRunner:
os.makedirs(os.path.dirname(filename), exist_ok=True)
self.results.to_csv(filename)
def fit(self, instances: List[Instance], n_jobs: int = 1) -> None:
def fit(
self,
instances: List[Instance],
n_jobs: int = 1,
progress: bool = True,
) -> None:
"""
Trains all solvers with the provided training instances.
@ -111,14 +108,126 @@ class BenchmarkRunner:
Number of parallel processes to use.
"""
components: List[Component] = []
for solver in self.solvers.values():
for (solver_name, solver) in self.solvers.items():
if solver_name == "baseline":
continue
components += solver.components.values()
Component.fit_multiple(
components,
instances,
n_jobs=n_jobs,
progress=progress,
)
def plot_results(self) -> None:
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
sns.set_style("whitegrid")
sns.set_palette("Blues_r")
groups = self.results.groupby("Instance")
best_lower_bound = groups["mip_lower_bound"].transform("max")
best_upper_bound = groups["mip_upper_bound"].transform("min")
self.results["Relative lower bound"] = (
self.results["mip_lower_bound"] / best_lower_bound
)
self.results["Relative upper bound"] = (
self.results["mip_upper_bound"] / best_upper_bound
)
sense = self.results.loc[0, "mip_sense"]
if (sense == "min").any():
primal_column = "Relative upper bound"
obj_column = "mip_upper_bound"
predicted_obj_column = "Objective: Predicted upper bound"
else:
primal_column = "Relative lower bound"
obj_column = "mip_lower_bound"
predicted_obj_column = "Objective: Predicted lower bound"
palette = {
"baseline": "#9b59b6",
"ml-exact": "#3498db",
"ml-heuristic": "#95a5a6",
}
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(
nrows=2,
ncols=2,
figsize=(8, 8),
)
# Wallclock time
sns.stripplot(
x="Solver",
y="mip_wallclock_time",
data=self.results,
ax=ax1,
jitter=0.25,
palette=palette,
size=2.0,
)
sns.barplot(
x="Solver",
y="mip_wallclock_time",
data=self.results,
ax=ax1,
errwidth=0.0,
alpha=0.4,
palette=palette,
)
ax1.set(ylabel="Wallclock time (s)")
# Gap
sns.stripplot(
x="Solver",
y="Gap",
jitter=0.25,
data=self.results[self.results["Solver"] != "ml-heuristic"],
ax=ax2,
palette=palette,
size=2.0,
)
ax2.set(ylabel="Relative MIP gap")
# Relative primal bound
sns.stripplot(
x="Solver",
y=primal_column,
jitter=0.25,
data=self.results[self.results["Solver"] == "ml-heuristic"],
ax=ax3,
palette=palette,
size=2.0,
)
sns.scatterplot(
x=obj_column,
y=predicted_obj_column,
hue="Solver",
data=self.results[self.results["Solver"] == "ml-exact"],
ax=ax4,
palette=palette,
size=2.0,
)
# Predicted vs actual primal bound
xlim, ylim = ax4.get_xlim(), ax4.get_ylim()
ax4.plot(
[-1e10, 1e10],
[-1e10, 1e10],
ls="-",
color="#cccccc",
)
ax4.set_xlim(xlim)
ax4.set_ylim(ylim)
ax4.get_legend().remove()
ax4.set(
ylabel="Predicted value",
xlabel="Actual value",
)
fig.tight_layout()
def _silence_miplearn_logger(self) -> None:
miplearn_logger = logging.getLogger("miplearn")
self.prev_log_level = miplearn_logger.getEffectiveLevel()
@ -127,3 +236,44 @@ class BenchmarkRunner:
def _restore_miplearn_logger(self) -> None:
miplearn_logger = logging.getLogger("miplearn")
miplearn_logger.setLevel(self.prev_log_level)
@ignore_warnings(category=ConvergenceWarning)
def run_benchmarks(
train_instances: List[Instance],
test_instances: List[Instance],
n_jobs: int = 4,
n_trials: int = 1,
progress: bool = False,
) -> None:
benchmark = BenchmarkRunner(
solvers={
"baseline": LearningSolver(
solver=GurobiPyomoSolver(),
),
"ml-exact": LearningSolver(
solver=GurobiPyomoSolver(),
),
"ml-heuristic": LearningSolver(
solver=GurobiPyomoSolver(),
mode="heuristic",
),
}
)
benchmark.solvers["baseline"].parallel_solve(
train_instances,
n_jobs=n_jobs,
progress=progress,
)
benchmark.fit(
train_instances,
n_jobs=n_jobs,
progress=progress,
)
benchmark.parallel_solve(
test_instances,
n_jobs=n_jobs,
n_trials=n_trials,
progress=progress,
)
benchmark.plot_results()

Loading…
Cancel
Save