mirror of
https://github.com/ANL-CEEESA/MIPLearn.git
synced 2025-12-06 09:28:51 -06:00
Benchmark: Add extra columns to CSV
This commit is contained in:
@@ -77,56 +77,37 @@ class BenchmarkRunner:
|
||||
def _push_result(self, result, solver, solver_name, instance):
|
||||
if self.results is None:
|
||||
self.results = pd.DataFrame(
|
||||
# Show the following columns first in the CSV file
|
||||
columns=[
|
||||
"Solver",
|
||||
"Instance",
|
||||
"Wallclock Time",
|
||||
"Lower Bound",
|
||||
"Upper Bound",
|
||||
"Gap",
|
||||
"Nodes",
|
||||
"Mode",
|
||||
"Sense",
|
||||
"Predicted LB",
|
||||
"Predicted UB",
|
||||
]
|
||||
)
|
||||
|
||||
lb = result["Lower bound"]
|
||||
ub = result["Upper bound"]
|
||||
gap = (ub - lb) / lb
|
||||
if "Predicted LB" not in result:
|
||||
result["Predicted LB"] = float("nan")
|
||||
result["Predicted UB"] = float("nan")
|
||||
self.results = self.results.append(
|
||||
{
|
||||
"Solver": solver_name,
|
||||
"Instance": instance,
|
||||
"Wallclock Time": result["Wallclock time"],
|
||||
"Lower Bound": lb,
|
||||
"Upper Bound": ub,
|
||||
"Gap": gap,
|
||||
"Nodes": result["Nodes"],
|
||||
"Mode": solver.mode,
|
||||
"Sense": result["Sense"],
|
||||
"Predicted LB": result["Predicted LB"],
|
||||
"Predicted UB": result["Predicted UB"],
|
||||
},
|
||||
ignore_index=True,
|
||||
)
|
||||
result["Solver"] = solver_name
|
||||
result["Instance"] = instance
|
||||
result["Gap"] = (ub - lb) / lb
|
||||
result["Mode"] = solver.mode
|
||||
del result["Log"]
|
||||
self.results = self.results.append(pd.DataFrame([result]))
|
||||
|
||||
# Compute relative statistics
|
||||
groups = self.results.groupby("Instance")
|
||||
best_lower_bound = groups["Lower Bound"].transform("max")
|
||||
best_upper_bound = groups["Upper Bound"].transform("min")
|
||||
best_lower_bound = groups["Lower bound"].transform("max")
|
||||
best_upper_bound = groups["Upper bound"].transform("min")
|
||||
best_gap = groups["Gap"].transform("min")
|
||||
best_nodes = np.maximum(1, groups["Nodes"].transform("min"))
|
||||
best_wallclock_time = groups["Wallclock Time"].transform("min")
|
||||
self.results["Relative Lower Bound"] = (
|
||||
self.results["Lower Bound"] / best_lower_bound
|
||||
best_wallclock_time = groups["Wallclock time"].transform("min")
|
||||
self.results["Relative lower bound"] = (
|
||||
self.results["Lower bound"] / best_lower_bound
|
||||
)
|
||||
self.results["Relative Upper Bound"] = (
|
||||
self.results["Upper Bound"] / best_upper_bound
|
||||
self.results["Relative upper bound"] = (
|
||||
self.results["Upper bound"] / best_upper_bound
|
||||
)
|
||||
self.results["Relative Wallclock Time"] = (
|
||||
self.results["Wallclock Time"] / best_wallclock_time
|
||||
self.results["Relative wallclock time"] = (
|
||||
self.results["Wallclock time"] / best_wallclock_time
|
||||
)
|
||||
self.results["Relative Gap"] = self.results["Gap"] / best_gap
|
||||
self.results["Relative Nodes"] = self.results["Nodes"] / best_nodes
|
||||
@@ -143,12 +124,12 @@ class BenchmarkRunner:
|
||||
|
||||
sense = results.loc[0, "Sense"]
|
||||
if sense == "min":
|
||||
primal_column = "Relative Upper Bound"
|
||||
obj_column = "Upper Bound"
|
||||
primal_column = "Relative upper bound"
|
||||
obj_column = "Upper bound"
|
||||
predicted_obj_column = "Predicted UB"
|
||||
else:
|
||||
primal_column = "Relative Lower Bound"
|
||||
obj_column = "Lower Bound"
|
||||
primal_column = "Relative lower bound"
|
||||
obj_column = "Lower bound"
|
||||
predicted_obj_column = "Predicted LB"
|
||||
|
||||
fig, (ax1, ax2, ax3, ax4) = plt.subplots(
|
||||
@@ -158,10 +139,10 @@ class BenchmarkRunner:
|
||||
gridspec_kw={"width_ratios": [2, 1, 1, 2]},
|
||||
)
|
||||
|
||||
# Figure 1: Solver x Wallclock Time
|
||||
# Figure 1: Solver x Wallclock time
|
||||
sns.stripplot(
|
||||
x="Solver",
|
||||
y="Wallclock Time",
|
||||
y="Wallclock time",
|
||||
data=results,
|
||||
ax=ax1,
|
||||
jitter=0.25,
|
||||
@@ -169,14 +150,14 @@ class BenchmarkRunner:
|
||||
)
|
||||
sns.barplot(
|
||||
x="Solver",
|
||||
y="Wallclock Time",
|
||||
y="Wallclock time",
|
||||
data=results,
|
||||
ax=ax1,
|
||||
errwidth=0.0,
|
||||
alpha=0.4,
|
||||
estimator=median,
|
||||
)
|
||||
ax1.set(ylabel="Wallclock Time (s)")
|
||||
ax1.set(ylabel="Wallclock time (s)")
|
||||
|
||||
# Figure 2: Solver x Gap (%)
|
||||
ax2.set_ylim(-0.5, 5.5)
|
||||
|
||||
@@ -51,6 +51,11 @@ class ConvertTightIneqsIntoEqsStep(Component):
|
||||
return_constraints=True,
|
||||
)
|
||||
y = self.predict(x)
|
||||
|
||||
self.total_converted = 0
|
||||
self.total_restored = 0
|
||||
self.total_kept = 0
|
||||
self.total_iterations = 0
|
||||
for category in y.keys():
|
||||
for i in range(len(y[category])):
|
||||
if y[category][i][0] == 1:
|
||||
@@ -59,10 +64,17 @@ class ConvertTightIneqsIntoEqsStep(Component):
|
||||
self.original_sense[cid] = s
|
||||
solver.internal_solver.set_constraint_sense(cid, "=")
|
||||
self.converted += [cid]
|
||||
logger.info(f"Converted {len(self.converted)} inequalities")
|
||||
self.total_converted += 1
|
||||
else:
|
||||
self.total_kept += 1
|
||||
logger.info(f"Converted {self.total_converted} inequalities")
|
||||
|
||||
def after_solve(self, solver, instance, model, results):
|
||||
instance.slacks = solver.internal_solver.get_inequality_slacks()
|
||||
results["ConvertTight: Kept"] = self.total_kept
|
||||
results["ConvertTight: Converted"] = self.total_converted
|
||||
results["ConvertTight: Restored"] = self.total_restored
|
||||
results["ConvertTight: Iterations"] = self.total_iterations
|
||||
|
||||
def fit(self, training_instances):
|
||||
logger.debug("Extracting x and y...")
|
||||
@@ -173,7 +185,9 @@ class ConvertTightIneqsIntoEqsStep(Component):
|
||||
for cid in restored:
|
||||
self.converted.remove(cid)
|
||||
if len(restored) > 0:
|
||||
self.total_restored += len(restored)
|
||||
logger.info(f"Restored {len(restored)} inequalities")
|
||||
self.total_iterations += 1
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
@@ -57,6 +57,11 @@ class DropRedundantInequalitiesStep(Component):
|
||||
return_constraints=True,
|
||||
)
|
||||
y = self.predict(x)
|
||||
|
||||
self.total_dropped = 0
|
||||
self.total_restored = 0
|
||||
self.total_kept = 0
|
||||
self.total_iterations = 0
|
||||
for category in y.keys():
|
||||
for i in range(len(y[category])):
|
||||
if y[category][i][0] == 1:
|
||||
@@ -66,10 +71,17 @@ class DropRedundantInequalitiesStep(Component):
|
||||
obj=solver.internal_solver.extract_constraint(cid),
|
||||
)
|
||||
self.pool += [c]
|
||||
logger.info("Extracted %d predicted constraints" % len(self.pool))
|
||||
self.total_dropped += 1
|
||||
else:
|
||||
self.total_kept += 1
|
||||
logger.info(f"Extracted {self.total_dropped} predicted constraints")
|
||||
|
||||
def after_solve(self, solver, instance, model, results):
|
||||
instance.slacks = solver.internal_solver.get_inequality_slacks()
|
||||
results["DropRedundant: Kept"] = self.total_kept
|
||||
results["DropRedundant: Dropped"] = self.total_dropped
|
||||
results["DropRedundant: Restored"] = self.total_restored
|
||||
results["DropRedundant: Iterations"] = self.total_iterations
|
||||
|
||||
def fit(self, training_instances):
|
||||
logger.debug("Extracting x and y...")
|
||||
@@ -180,10 +192,12 @@ class DropRedundantInequalitiesStep(Component):
|
||||
self.pool.remove(c)
|
||||
solver.internal_solver.add_constraint(c.obj)
|
||||
if len(constraints_to_add) > 0:
|
||||
self.total_restored += len(constraints_to_add)
|
||||
logger.info(
|
||||
"%8d constraints %8d in the pool"
|
||||
% (len(constraints_to_add), len(self.pool))
|
||||
)
|
||||
self.total_iterations += 1
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
@@ -115,7 +115,7 @@ def test_drop_redundant():
|
||||
)
|
||||
|
||||
# LearningSolver calls after_solve
|
||||
component.after_solve(solver, instance, None, None)
|
||||
component.after_solve(solver, instance, None, {})
|
||||
|
||||
# Should query slack for all inequalities
|
||||
internal.get_inequality_slacks.assert_called_once()
|
||||
|
||||
@@ -27,11 +27,11 @@ def test_benchmark():
|
||||
benchmark = BenchmarkRunner(test_solvers)
|
||||
benchmark.fit(train_instances)
|
||||
benchmark.parallel_solve(test_instances, n_jobs=2, n_trials=2)
|
||||
assert benchmark.raw_results().values.shape == (12, 16)
|
||||
assert benchmark.raw_results().values.shape == (12, 18)
|
||||
|
||||
benchmark.save_results("/tmp/benchmark.csv")
|
||||
assert os.path.isfile("/tmp/benchmark.csv")
|
||||
|
||||
benchmark = BenchmarkRunner(test_solvers)
|
||||
benchmark.load_results("/tmp/benchmark.csv")
|
||||
assert benchmark.raw_results().values.shape == (12, 16)
|
||||
assert benchmark.raw_results().values.shape == (12, 18)
|
||||
|
||||
Reference in New Issue
Block a user