Update benchmark scripts

pull/1/head
Alinson S. Xavier 6 years ago
parent a2fbb9f8d8
commit bd1d2117c5

@ -12,7 +12,7 @@ main: $(addsuffix /performance.png, $(CHALLENGES))
%/train_instances.bin: %/train_instances.bin:
python benchmark.py train $* python benchmark.py train $*
%/benchmark_baseline.csv: %/training_data.bin %/benchmark_baseline.csv: %/train_instances.bin
python benchmark.py test-baseline $* python benchmark.py test-baseline $*
%/benchmark_ml.csv: %/benchmark_baseline.csv %/benchmark_ml.csv: %/benchmark_baseline.csv

@ -16,11 +16,7 @@ Options:
""" """
from docopt import docopt from docopt import docopt
import importlib, pathlib import importlib, pathlib
from miplearn import (LearningSolver, from miplearn import (LearningSolver, BenchmarkRunner)
BenchmarkRunner,
WarmStartComponent,
BranchPriorityComponent,
)
from numpy import median from numpy import median
import pyomo.environ as pe import pyomo.environ as pe
import pickle import pickle
@ -29,7 +25,7 @@ import logging
logging.getLogger('pyomo.core').setLevel(logging.ERROR) logging.getLogger('pyomo.core').setLevel(logging.ERROR)
n_jobs = 10 n_jobs = 10
time_limit = 300 time_limit = 900
internal_solver = "gurobi" internal_solver = "gurobi"
args = docopt(__doc__) args = docopt(__doc__)
@ -65,32 +61,33 @@ def train():
def test_baseline(): def test_baseline():
test_instances = load("%s/test_instances.bin" % basepath)
solvers = { solvers = {
"baseline": LearningSolver( "baseline": LearningSolver(
time_limit=time_limit, time_limit=time_limit,
components={}, components={},
), ),
} }
test_instances = load("%s/test_instances.bin" % basepath)
benchmark = BenchmarkRunner(solvers) benchmark = BenchmarkRunner(solvers)
benchmark.parallel_solve(test_instances, n_jobs=n_jobs) benchmark.parallel_solve(test_instances, n_jobs=n_jobs)
benchmark.save_results("%s/benchmark_baseline.csv" % basepath) benchmark.save_results("%s/benchmark_baseline.csv" % basepath)
def test_ml(): def test_ml():
train_instances = load("%s/train_instances.bin" % basepath)
test_instances = load("%s/test_instances.bin" % basepath)
solvers = { solvers = {
"ml-exact": LearningSolver( "ml-exact": LearningSolver(
time_limit=time_limit, time_limit=time_limit,
), ),
"ml-heuristic": LearningSolver( "ml-heuristic": LearningSolver(
time_limit=time_limit, time_limit=time_limit,
mode="heuristic", mode="heuristic",
), ),
} }
test_instances = load("%s/test_instances.bin" % basepath)
benchmark = BenchmarkRunner(solvers) benchmark = BenchmarkRunner(solvers)
benchmark.load_state("%s/training_data.bin" % basepath)
benchmark.load_results("%s/benchmark_baseline.csv" % basepath) benchmark.load_results("%s/benchmark_baseline.csv" % basepath)
benchmark.fit(train_instances)
benchmark.parallel_solve(test_instances, n_jobs=n_jobs) benchmark.parallel_solve(test_instances, n_jobs=n_jobs)
benchmark.save_results("%s/benchmark_ml.csv" % basepath) benchmark.save_results("%s/benchmark_ml.csv" % basepath)
@ -155,9 +152,9 @@ def charts():
if __name__ == "__main__": if __name__ == "__main__":
if args["train"]: if args["train"]:
train() train()
#if args["test-baseline"]: if args["test-baseline"]:
# test_baseline() test_baseline()
#if args["test-ml"]: if args["test-ml"]:
# test_ml() test_ml()
#if args["charts"]: if args["charts"]:
# charts() charts()

Loading…
Cancel
Save