Update benchmark scripts

pull/1/head
Alinson S. Xavier 6 years ago
parent 212e38d004
commit 24b0a7888e

@ -9,7 +9,7 @@ CHALLENGES := \
main: $(addsuffix /performance.png, $(CHALLENGES)) main: $(addsuffix /performance.png, $(CHALLENGES))
%/training_data.bin: %/train_instances.bin:
python benchmark.py train $* python benchmark.py train $*
%/benchmark_baseline.csv: %/training_data.bin %/benchmark_baseline.csv: %/training_data.bin

@ -28,6 +28,9 @@ import pickle
import logging import logging
logging.getLogger('pyomo.core').setLevel(logging.ERROR) logging.getLogger('pyomo.core').setLevel(logging.ERROR)
n_jobs = 10
time_limit = 300
args = docopt(__doc__) args = docopt(__doc__)
basepath = args["<challenge>"] basepath = args["<challenge>"]
pathlib.Path(basepath).mkdir(parents=True, exist_ok=True) pathlib.Path(basepath).mkdir(parents=True, exist_ok=True)
@ -45,36 +48,15 @@ def load(filename):
return pickle.load(file) return pickle.load(file)
def train_solver_factory():
solver = pe.SolverFactory('gurobi_persistent')
solver.options["threads"] = 4
solver.options["TimeLimit"] = 300
return solver
def test_solver_factory():
solver = pe.SolverFactory('gurobi_persistent')
solver.options["threads"] = 4
solver.options["TimeLimit"] = 300
return solver
def train(): def train():
problem_name, challenge_name = args["<challenge>"].split("/") problem_name, challenge_name = args["<challenge>"].split("/")
pkg = importlib.import_module("miplearn.problems.%s" % problem_name) pkg = importlib.import_module("miplearn.problems.%s" % problem_name)
challenge = getattr(pkg, challenge_name)() challenge = getattr(pkg, challenge_name)()
train_instances = challenge.training_instances train_instances = challenge.training_instances[:10]
test_instances = challenge.test_instances test_instances = challenge.test_instances[:10]
solver = LearningSolver( solver = LearningSolver(time_limit=time_limit, components={})
internal_solver_factory=train_solver_factory, solver.parallel_solve(train_instances, n_jobs=n_jobs)
components={ solver.fit(n_jobs=n_jobs)
"warm-start": WarmStartComponent(),
#"branch-priority": BranchPriorityComponent(),
},
)
solver.parallel_solve(train_instances, n_jobs=10)
solver.fit(n_jobs=10)
solver.save_state("%s/training_data.bin" % basepath)
save(train_instances, "%s/train_instances.bin" % basepath) save(train_instances, "%s/train_instances.bin" % basepath)
save(test_instances, "%s/test_instances.bin" % basepath) save(test_instances, "%s/test_instances.bin" % basepath)
@ -82,39 +64,31 @@ def train():
def test_baseline(): def test_baseline():
solvers = { solvers = {
"baseline": LearningSolver( "baseline": LearningSolver(
internal_solver_factory=test_solver_factory, time_limit=time_limit,
components={}, components={},
), ),
} }
test_instances = load("%s/test_instances.bin" % basepath) test_instances = load("%s/test_instances.bin" % basepath)
benchmark = BenchmarkRunner(solvers) benchmark = BenchmarkRunner(solvers)
benchmark.parallel_solve(test_instances, n_jobs=10) benchmark.parallel_solve(test_instances, n_jobs=n_jobs)
benchmark.save_results("%s/benchmark_baseline.csv" % basepath) benchmark.save_results("%s/benchmark_baseline.csv" % basepath)
def test_ml(): def test_ml():
solvers = { solvers = {
"ml-exact": LearningSolver( "ml-exact": LearningSolver(
internal_solver_factory=test_solver_factory, time_limit=time_limit,
components={
"warm-start": WarmStartComponent(),
#"branch-priority": BranchPriorityComponent(),
},
), ),
"ml-heuristic": LearningSolver( "ml-heuristic": LearningSolver(
internal_solver_factory=test_solver_factory, time_limit=time_limit,
mode="heuristic", mode="heuristic",
components={
"warm-start": WarmStartComponent(),
#"branch-priority": BranchPriorityComponent(),
},
), ),
} }
test_instances = load("%s/test_instances.bin" % basepath) test_instances = load("%s/test_instances.bin" % basepath)
benchmark = BenchmarkRunner(solvers) benchmark = BenchmarkRunner(solvers)
benchmark.load_state("%s/training_data.bin" % basepath) benchmark.load_state("%s/training_data.bin" % basepath)
benchmark.load_results("%s/benchmark_baseline.csv" % basepath) benchmark.load_results("%s/benchmark_baseline.csv" % basepath)
benchmark.parallel_solve(test_instances, n_jobs=10) benchmark.parallel_solve(test_instances, n_jobs=n_jobs)
benchmark.save_results("%s/benchmark_ml.csv" % basepath) benchmark.save_results("%s/benchmark_ml.csv" % basepath)
@ -178,9 +152,9 @@ def charts():
if __name__ == "__main__": if __name__ == "__main__":
if args["train"]: if args["train"]:
train() train()
if args["test-baseline"]: #if args["test-baseline"]:
test_baseline() # test_baseline()
if args["test-ml"]: #if args["test-ml"]:
test_ml() # test_ml()
if args["charts"]: #if args["charts"]:
charts() # charts()
Loading…
Cancel
Save