mirror of
https://github.com/ANL-CEEESA/MIPLearn.git
synced 2025-12-06 09:28:51 -06:00
Add customizable branch priority; add more metrics to BenchmarkRunner
This commit is contained in:
@@ -18,15 +18,21 @@ class BenchmarkRunner:
|
|||||||
solver.load(filename)
|
solver.load(filename)
|
||||||
solver.fit()
|
solver.fit()
|
||||||
|
|
||||||
def parallel_solve(self, instances, n_jobs=1):
|
def parallel_solve(self, instances, n_jobs=1, n_trials=1):
|
||||||
if self.results is None:
|
if self.results is None:
|
||||||
self.results = pd.DataFrame(columns=["Solver",
|
self.results = pd.DataFrame(columns=["Solver",
|
||||||
"Instance",
|
"Instance",
|
||||||
"Wallclock Time",
|
"Wallclock Time",
|
||||||
"Obj Value",
|
"Lower Bound",
|
||||||
|
"Upper Bound",
|
||||||
|
"Gap",
|
||||||
|
"Nodes",
|
||||||
])
|
])
|
||||||
|
instances = instances * n_trials
|
||||||
for (name, solver) in self.solvers.items():
|
for (name, solver) in self.solvers.items():
|
||||||
results = solver.parallel_solve(instances, n_jobs=n_jobs, label=name)
|
results = solver.parallel_solve(instances,
|
||||||
|
n_jobs=n_jobs,
|
||||||
|
label=name)
|
||||||
for i in range(len(instances)):
|
for i in range(len(instances)):
|
||||||
wallclock_time = None
|
wallclock_time = None
|
||||||
for key in ["Time", "Wall time", "Wallclock time"]:
|
for key in ["Time", "Wall time", "Wallclock time"]:
|
||||||
@@ -35,19 +41,35 @@ class BenchmarkRunner:
|
|||||||
if str(results[i]["Solver"][0][key]) == "<undefined>":
|
if str(results[i]["Solver"][0][key]) == "<undefined>":
|
||||||
continue
|
continue
|
||||||
wallclock_time = float(results[i]["Solver"][0][key])
|
wallclock_time = float(results[i]["Solver"][0][key])
|
||||||
|
nodes = results[i]["Solver"][0]["Nodes"]
|
||||||
|
lb = results[i]["Problem"][0]["Lower bound"]
|
||||||
|
ub = results[i]["Problem"][0]["Upper bound"]
|
||||||
|
gap = (ub - lb) / lb
|
||||||
self.results = self.results.append({
|
self.results = self.results.append({
|
||||||
"Solver": name,
|
"Solver": name,
|
||||||
"Instance": i,
|
"Instance": i,
|
||||||
"Wallclock Time": wallclock_time,
|
"Wallclock Time": wallclock_time,
|
||||||
"Obj Value": results[i]["Problem"][0]["Lower bound"]
|
"Lower Bound": lb,
|
||||||
|
"Upper Bound": ub,
|
||||||
|
"Gap": gap,
|
||||||
|
"Nodes": nodes,
|
||||||
}, ignore_index=True)
|
}, ignore_index=True)
|
||||||
groups = self.results.groupby("Instance")
|
groups = self.results.groupby("Instance")
|
||||||
best_obj_value = groups["Obj Value"].transform("max")
|
best_lower_bound = groups["Lower Bound"].transform("max")
|
||||||
|
best_upper_bound = groups["Upper Bound"].transform("min")
|
||||||
|
best_gap = groups["Gap"].transform("min")
|
||||||
|
best_nodes = groups["Nodes"].transform("min")
|
||||||
best_wallclock_time = groups["Wallclock Time"].transform("min")
|
best_wallclock_time = groups["Wallclock Time"].transform("min")
|
||||||
self.results["Relative Obj Value"] = \
|
self.results["Relative Lower Bound"] = \
|
||||||
self.results["Obj Value"] / best_obj_value
|
self.results["Lower Bound"] / best_lower_bound
|
||||||
|
self.results["Relative Upper Bound"] = \
|
||||||
|
self.results["Upper Bound"] / best_upper_bound
|
||||||
self.results["Relative Wallclock Time"] = \
|
self.results["Relative Wallclock Time"] = \
|
||||||
self.results["Wallclock Time"] / best_wallclock_time
|
self.results["Wallclock Time"] / best_wallclock_time
|
||||||
|
self.results["Relative Gap"] = \
|
||||||
|
self.results["Gap"] / best_gap
|
||||||
|
self.results["Relative Nodes"] = \
|
||||||
|
self.results["Nodes"] / best_nodes
|
||||||
|
|
||||||
def raw_results(self):
|
def raw_results(self):
|
||||||
return self.results
|
return self.results
|
||||||
|
|||||||
@@ -3,19 +3,21 @@
|
|||||||
# Written by Alinson S. Xavier <axavier@anl.gov>
|
# Written by Alinson S. Xavier <axavier@anl.gov>
|
||||||
|
|
||||||
from .transformers import PerVariableTransformer
|
from .transformers import PerVariableTransformer
|
||||||
from .warmstart import KnnWarmStartPredictor
|
from .warmstart import KnnWarmStartPredictor, LogisticWarmStartPredictor
|
||||||
import pyomo.environ as pe
|
import pyomo.environ as pe
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from copy import copy, deepcopy
|
from copy import copy, deepcopy
|
||||||
import pickle
|
import pickle
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
from joblib import Parallel, delayed
|
from joblib import Parallel, delayed
|
||||||
|
from scipy.stats import randint
|
||||||
import multiprocessing
|
import multiprocessing
|
||||||
|
|
||||||
|
|
||||||
def _gurobi_factory():
|
def _gurobi_factory():
|
||||||
solver = pe.SolverFactory('gurobi_persistent')
|
solver = pe.SolverFactory('gurobi_persistent')
|
||||||
solver.options["threads"] = 4
|
solver.options["threads"] = 4
|
||||||
|
solver.options["Seed"] = randint(low=0, high=1000).rvs()
|
||||||
return solver
|
return solver
|
||||||
|
|
||||||
class LearningSolver:
|
class LearningSolver:
|
||||||
@@ -27,7 +29,8 @@ class LearningSolver:
|
|||||||
def __init__(self,
|
def __init__(self,
|
||||||
threads=4,
|
threads=4,
|
||||||
internal_solver_factory=_gurobi_factory,
|
internal_solver_factory=_gurobi_factory,
|
||||||
ws_predictor=KnnWarmStartPredictor(),
|
ws_predictor=LogisticWarmStartPredictor(),
|
||||||
|
branch_priority=None,
|
||||||
mode="exact"):
|
mode="exact"):
|
||||||
self.internal_solver_factory = internal_solver_factory
|
self.internal_solver_factory = internal_solver_factory
|
||||||
self.internal_solver = self.internal_solver_factory()
|
self.internal_solver = self.internal_solver_factory()
|
||||||
@@ -36,10 +39,14 @@ class LearningSolver:
|
|||||||
self.y_train = {}
|
self.y_train = {}
|
||||||
self.ws_predictors = {}
|
self.ws_predictors = {}
|
||||||
self.ws_predictor_prototype = ws_predictor
|
self.ws_predictor_prototype = ws_predictor
|
||||||
|
self.branch_priority = branch_priority
|
||||||
|
|
||||||
def solve(self, instance, tee=False):
|
def solve(self, instance, tee=False):
|
||||||
# Convert instance into concrete model
|
# Load model into solver
|
||||||
model = instance.to_model()
|
model = instance.to_model()
|
||||||
|
is_solver_persistent = hasattr(self.internal_solver, "set_instance")
|
||||||
|
if is_solver_persistent:
|
||||||
|
self.internal_solver.set_instance(model)
|
||||||
|
|
||||||
# Split decision variables according to their category
|
# Split decision variables according to their category
|
||||||
transformer = PerVariableTransformer()
|
transformer = PerVariableTransformer()
|
||||||
@@ -56,10 +63,11 @@ class LearningSolver:
|
|||||||
else:
|
else:
|
||||||
self.x_train[category] = np.vstack([self.x_train[category], x])
|
self.x_train[category] = np.vstack([self.x_train[category], x])
|
||||||
|
|
||||||
# Predict warm start
|
|
||||||
for category in var_split.keys():
|
for category in var_split.keys():
|
||||||
if category in self.ws_predictors.keys():
|
|
||||||
var_index_pairs = var_split[category]
|
var_index_pairs = var_split[category]
|
||||||
|
|
||||||
|
# Predict warm starts
|
||||||
|
if category in self.ws_predictors.keys():
|
||||||
ws = self.ws_predictors[category].predict(x_test[category])
|
ws = self.ws_predictors[category].predict(x_test[category])
|
||||||
assert ws.shape == (len(var_index_pairs), 2)
|
assert ws.shape == (len(var_index_pairs), 2)
|
||||||
for i in range(len(var_index_pairs)):
|
for i in range(len(var_index_pairs)):
|
||||||
@@ -75,8 +83,22 @@ class LearningSolver:
|
|||||||
elif ws[i,1] == 1:
|
elif ws[i,1] == 1:
|
||||||
var[index].value = 1
|
var[index].value = 1
|
||||||
|
|
||||||
# Solve MILP
|
# Set custom branch priority
|
||||||
solve_results = self._solve(model, tee=tee)
|
if self.branch_priority is not None:
|
||||||
|
assert is_solver_persistent
|
||||||
|
from gurobipy import GRB
|
||||||
|
for (i, (var, index)) in enumerate(var_index_pairs):
|
||||||
|
gvar = self.internal_solver._pyomo_var_to_solver_var_map[var[index]]
|
||||||
|
#priority = randint(low=0, high=1000).rvs()
|
||||||
|
gvar.setAttr(GRB.Attr.BranchPriority, self.branch_priority[index])
|
||||||
|
|
||||||
|
if is_solver_persistent:
|
||||||
|
solve_results = self.internal_solver.solve(tee=tee, warmstart=True)
|
||||||
|
else:
|
||||||
|
solve_results = self.internal_solver.solve(model, tee=tee, warmstart=True)
|
||||||
|
|
||||||
|
solve_results["Solver"][0]["Nodes"] = self.internal_solver._solver_model.getAttr("NodeCount")
|
||||||
|
|
||||||
|
|
||||||
# Update y_train
|
# Update y_train
|
||||||
for category in var_split.keys():
|
for category in var_split.keys():
|
||||||
@@ -113,7 +135,7 @@ class LearningSolver:
|
|||||||
|
|
||||||
results = Parallel(n_jobs=n_jobs)(
|
results = Parallel(n_jobs=n_jobs)(
|
||||||
delayed(_process)(instance)
|
delayed(_process)(instance)
|
||||||
for instance in tqdm(instances, desc=label)
|
for instance in tqdm(instances, desc=label, ncols=80)
|
||||||
)
|
)
|
||||||
|
|
||||||
x_train, y_train, results = _merge(results)
|
x_train, y_train, results = _merge(results)
|
||||||
@@ -148,10 +170,3 @@ class LearningSolver:
|
|||||||
self.x_train = data["x_train"]
|
self.x_train = data["x_train"]
|
||||||
self.y_train = data["y_train"]
|
self.y_train = data["y_train"]
|
||||||
self.ws_predictors = self.ws_predictors
|
self.ws_predictors = self.ws_predictors
|
||||||
|
|
||||||
def _solve(self, model, tee=False):
|
|
||||||
if hasattr(self.internal_solver, "set_instance"):
|
|
||||||
self.internal_solver.set_instance(model)
|
|
||||||
return self.internal_solver.solve(tee=tee, warmstart=True)
|
|
||||||
else:
|
|
||||||
return self.internal_solver.solve(model, tee=tee, warmstart=True)
|
|
||||||
|
|||||||
@@ -28,12 +28,12 @@ def test_benchmark():
|
|||||||
}
|
}
|
||||||
benchmark = BenchmarkRunner(test_solvers)
|
benchmark = BenchmarkRunner(test_solvers)
|
||||||
benchmark.load_fit("data.bin")
|
benchmark.load_fit("data.bin")
|
||||||
benchmark.parallel_solve(test_instances, n_jobs=2)
|
benchmark.parallel_solve(test_instances, n_jobs=2, n_trials=2)
|
||||||
assert benchmark.raw_results().values.shape == (6,6)
|
assert benchmark.raw_results().values.shape == (12,12)
|
||||||
|
|
||||||
benchmark.save_results("/tmp/benchmark.csv")
|
benchmark.save_results("/tmp/benchmark.csv")
|
||||||
assert os.path.isfile("/tmp/benchmark.csv")
|
assert os.path.isfile("/tmp/benchmark.csv")
|
||||||
|
|
||||||
benchmark = BenchmarkRunner(test_solvers)
|
benchmark = BenchmarkRunner(test_solvers)
|
||||||
benchmark.load_results("/tmp/benchmark.csv")
|
benchmark.load_results("/tmp/benchmark.csv")
|
||||||
assert benchmark.raw_results().values.shape == (6,6)
|
assert benchmark.raw_results().values.shape == (12,12)
|
||||||
|
|||||||
@@ -41,3 +41,10 @@ def test_parallel_solve():
|
|||||||
solver.parallel_solve(instances, n_jobs=3)
|
solver.parallel_solve(instances, n_jobs=3)
|
||||||
assert len(solver.x_train[0]) == 10
|
assert len(solver.x_train[0]) == 10
|
||||||
assert len(solver.y_train[0]) == 10
|
assert len(solver.y_train[0]) == 10
|
||||||
|
|
||||||
|
def test_solver_random_branch_priority():
|
||||||
|
instance = KnapsackInstance2(weights=[23., 26., 20., 18.],
|
||||||
|
prices=[505., 352., 458., 220.],
|
||||||
|
capacity=67.)
|
||||||
|
solver = LearningSolver(branch_priority=[1, 2, 3, 4])
|
||||||
|
solver.solve(instance, tee=True)
|
||||||
Reference in New Issue
Block a user