From 5eee00c626d56d65668da31a6aa1cf9f4e10768d Mon Sep 17 00:00:00 2001 From: Alinson S Xavier Date: Wed, 26 Feb 2020 15:45:02 -0600 Subject: [PATCH] Plot predicted objective value --- benchmark/benchmark.py | 46 +++++++++++++++++++++++--------- miplearn/benchmark.py | 4 +++ miplearn/components/component.py | 2 +- miplearn/components/lazy.py | 2 +- miplearn/components/objective.py | 9 +++++-- miplearn/components/primal.py | 2 +- miplearn/solvers.py | 2 +- miplearn/tests/test_benchmark.py | 4 +-- 8 files changed, 50 insertions(+), 21 deletions(-) diff --git a/benchmark/benchmark.py b/benchmark/benchmark.py index 31a5f72..ff98c23 100755 --- a/benchmark/benchmark.py +++ b/benchmark/benchmark.py @@ -118,52 +118,72 @@ def charts(): sense = results.loc[0, "Sense"] if sense == "min": primal_column = "Relative Upper Bound" + obj_column = "Upper Bound" + predicted_obj_column = "Predicted UB" else: primal_column = "Relative Lower Bound" + obj_column = "Lower Bound" + predicted_obj_column = "Predicted LB" palette={ "baseline": "#9b59b6", "ml-exact": "#3498db", "ml-heuristic": "#95a5a6" } - fig, axes = plt.subplots(nrows=1, - ncols=3, - figsize=(10,4), - gridspec_kw={'width_ratios': [3, 3, 2]}, - ) + fig, (ax1, ax2, ax3, ax4) = plt.subplots(nrows=1, + ncols=4, + figsize=(12,4), + gridspec_kw={'width_ratios': [2, 1, 1, 2]}, + ) sns.stripplot(x="Solver", y="Wallclock Time", data=results, - ax=axes[0], + ax=ax1, jitter=0.25, palette=palette, + size=4.0, ); sns.barplot(x="Solver", y="Wallclock Time", data=results, - ax=axes[0], + ax=ax1, errwidth=0., - alpha=0.3, + alpha=0.4, palette=palette, estimator=median, ); - axes[0].set(ylabel='Wallclock Time (s)') - axes[1].set_ylim(-0.5, 5.5) + ax1.set(ylabel='Wallclock Time (s)') + ax2.set_ylim(-0.5, 5.5) sns.stripplot(x="Solver", y="Gap (%)", jitter=0.25, data=results[results["Solver"] != "ml-heuristic"], - ax=axes[1], + ax=ax2, palette=palette, + size=4.0, ); - axes[2].set_ylim(0.95,1.01) + ax3.set_ylim(0.95,1.05) sns.stripplot(x="Solver", y=primal_column, jitter=0.25, data=results[results["Solver"] == "ml-heuristic"], - ax=axes[2], + ax=ax3, palette=palette, ); + + sns.scatterplot(x=obj_column, + y=predicted_obj_column, + hue="Solver", + data=results[results["Solver"] == "ml-exact"], + ax=ax4, + palette=palette, + ); + xlim, ylim = ax4.get_xlim(), ax4.get_ylim() + ax4.plot([-1e10, 1e10], [-1e10, 1e10], ls='-', color="#cccccc"); + ax4.set_xlim(xlim) + ax4.set_ylim(ylim) + ax4.get_legend().remove() + fig.tight_layout() plt.savefig("%s/performance.png" % basepath, bbox_inches='tight', diff --git a/miplearn/benchmark.py b/miplearn/benchmark.py index c46c009..b71c808 100644 --- a/miplearn/benchmark.py +++ b/miplearn/benchmark.py @@ -64,6 +64,8 @@ class BenchmarkRunner: "Nodes", "Mode", "Sense", + "Predicted LB", + "Predicted UB", ]) lb = result["Lower bound"] ub = result["Upper bound"] @@ -78,6 +80,8 @@ class BenchmarkRunner: "Nodes": result["Nodes"], "Mode": solver.mode, "Sense": result["Sense"], + "Predicted LB": result["Predicted LB"], + "Predicted UB": result["Predicted UB"], }, ignore_index=True) groups = self.results.groupby("Instance") best_lower_bound = groups["Lower Bound"].transform("max") diff --git a/miplearn/components/component.py b/miplearn/components/component.py index fba3bf1..3135307 100644 --- a/miplearn/components/component.py +++ b/miplearn/components/component.py @@ -15,7 +15,7 @@ class Component(ABC): pass @abstractmethod - def after_solve(self, solver, instance, model): + def after_solve(self, solver, instance, model, results): pass @abstractmethod diff --git a/miplearn/components/lazy.py b/miplearn/components/lazy.py index 8200085..036b396 100644 --- a/miplearn/components/lazy.py +++ b/miplearn/components/lazy.py @@ -38,7 +38,7 @@ class LazyConstraintsComponent(Component): cut = instance.build_lazy_constraint(model, v) solver.internal_solver.add_constraint(cut) - def after_solve(self, solver, instance, model): + def after_solve(self, solver, instance, model, results): pass def fit(self, training_instances): diff --git a/miplearn/components/objective.py b/miplearn/components/objective.py index cf6d20e..39394f9 100644 --- a/miplearn/components/objective.py +++ b/miplearn/components/objective.py @@ -27,8 +27,13 @@ class ObjectiveValueComponent(Component): instance.predicted_lb = lb logger.info("Predicted objective: [%.2f, %.2f]" % (lb, ub)) - def after_solve(self, solver, instance, model): - pass + def after_solve(self, solver, instance, model, results): + if self.ub_regressor is not None: + results["Predicted UB"] = instance.predicted_ub + results["Predicted LB"] = instance.predicted_lb + else: + results["Predicted UB"] = None + results["Predicted LB"] = None def fit(self, training_instances): logger.debug("Extracting features...") diff --git a/miplearn/components/primal.py b/miplearn/components/primal.py index 3b21075..bc3b9f4 100644 --- a/miplearn/components/primal.py +++ b/miplearn/components/primal.py @@ -135,7 +135,7 @@ class PrimalSolutionComponent(Component): else: solver.internal_solver.set_warm_start(solution) - def after_solve(self, solver, instance, model): + def after_solve(self, solver, instance, model, results): pass def fit(self, training_instances): diff --git a/miplearn/solvers.py b/miplearn/solvers.py index 7449129..381fc12 100644 --- a/miplearn/solvers.py +++ b/miplearn/solvers.py @@ -306,7 +306,7 @@ class LearningSolver: logger.debug("Calling after_solve callbacks...") for component in self.components.values(): - component.after_solve(self, instance, model) + component.after_solve(self, instance, model, results) # Store instance for future training self.training_instances += [instance] diff --git a/miplearn/tests/test_benchmark.py b/miplearn/tests/test_benchmark.py index 523ba61..5aa0441 100644 --- a/miplearn/tests/test_benchmark.py +++ b/miplearn/tests/test_benchmark.py @@ -27,11 +27,11 @@ def test_benchmark(): benchmark = BenchmarkRunner(test_solvers) benchmark.fit(train_instances) benchmark.parallel_solve(test_instances, n_jobs=2, n_trials=2) - assert benchmark.raw_results().values.shape == (12,13) + assert benchmark.raw_results().values.shape == (12,16) benchmark.save_results("/tmp/benchmark.csv") assert os.path.isfile("/tmp/benchmark.csv") benchmark = BenchmarkRunner(test_solvers) benchmark.load_results("/tmp/benchmark.csv") - assert benchmark.raw_results().values.shape == (12,13) + assert benchmark.raw_results().values.shape == (12,16)