Small improvements to benchmark scripts

master
Alinson S. Xavier 5 years ago
parent 28e2ba7c01
commit 1a04482a20
No known key found for this signature in database
GPG Key ID: A796166E4E218E02

@ -10,7 +10,9 @@ CHALLENGES := \
knapsack/ChallengeA \ knapsack/ChallengeA \
tsp/ChallengeA tsp/ChallengeA
main: $(addsuffix /performance.png, $(CHALLENGES)) test: $(addsuffix /performance.png, $(CHALLENGES))
train: $(addsuffix /train_instances.bin, $(CHALLENGES))
%/train_instances.bin: %/train_instances.bin:
python benchmark.py train $* python benchmark.py train $*

@ -3,16 +3,21 @@
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved. # Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details. # Released under the modified BSD license. See COPYING.md for more details.
"""Benchmark script """MIPLearn Benchmark Scripts
Usage: Usage:
benchmark.py train <challenge> benchmark.py train [options] <challenge>
benchmark.py test-baseline <challenge> benchmark.py test-baseline [options] <challenge>
benchmark.py test-ml <challenge> benchmark.py test-ml [options] <challenge>
benchmark.py charts <challenge> benchmark.py charts <challenge>
Options: Options:
-h --help Show this screen -h --help Show this screen
--jobs=<n> Number of instances to solve simultaneously [default: 10]
--train-time-limit=<n> Solver time limit during training in seconds [default: 3600]
--test-time-limit=<n> Solver time limit during test in seconds [default: 900]
--solver-threads=<n> Number of threads the solver is allowed to use [default: 4]
--solver=<s> Internal MILP solver to use [default: gurobi]
""" """
import importlib import importlib
import logging import logging
@ -36,15 +41,16 @@ logging.getLogger("pyomo.core").setLevel(logging.ERROR)
logging.getLogger("miplearn").setLevel(logging.INFO) logging.getLogger("miplearn").setLevel(logging.INFO)
logger = logging.getLogger("benchmark") logger = logging.getLogger("benchmark")
n_jobs = 10
train_time_limit = 3600
test_time_limit = 900
internal_solver = "gurobi"
args = docopt(__doc__) args = docopt(__doc__)
basepath = args["<challenge>"] basepath = args["<challenge>"]
pathlib.Path(basepath).mkdir(parents=True, exist_ok=True) pathlib.Path(basepath).mkdir(parents=True, exist_ok=True)
n_jobs = int(args["--jobs"])
n_threads = int(args["--solver-threads"])
train_time_limit = int(args["--train-time-limit"])
test_time_limit = int(args["--test-time-limit"])
internal_solver = args["--solver"]
def save(obj, filename): def save(obj, filename):
logger.info("Writing %s..." % filename) logger.info("Writing %s..." % filename)
@ -68,6 +74,7 @@ def train():
solver = LearningSolver( solver = LearningSolver(
time_limit=train_time_limit, time_limit=train_time_limit,
solver=internal_solver, solver=internal_solver,
threads=n_threads,
) )
solver.parallel_solve(train_instances, n_jobs=n_jobs) solver.parallel_solve(train_instances, n_jobs=n_jobs)
save(train_instances, "%s/train_instances.bin" % basepath) save(train_instances, "%s/train_instances.bin" % basepath)
@ -80,6 +87,7 @@ def test_baseline():
"baseline": LearningSolver( "baseline": LearningSolver(
time_limit=test_time_limit, time_limit=test_time_limit,
solver=internal_solver, solver=internal_solver,
threads=n_threads,
), ),
} }
benchmark = BenchmarkRunner(solvers) benchmark = BenchmarkRunner(solvers)
@ -95,10 +103,12 @@ def test_ml():
"ml-exact": LearningSolver( "ml-exact": LearningSolver(
time_limit=test_time_limit, time_limit=test_time_limit,
solver=internal_solver, solver=internal_solver,
threads=n_threads,
), ),
"ml-heuristic": LearningSolver( "ml-heuristic": LearningSolver(
time_limit=test_time_limit, time_limit=test_time_limit,
solver=internal_solver, solver=internal_solver,
threads=n_threads,
mode="heuristic", mode="heuristic",
), ),
} }

Loading…
Cancel
Save