diff --git a/docs/benchmark/index.html b/docs/benchmark/index.html index 11ef9e8..2f672ad 100644 --- a/docs/benchmark/index.html +++ b/docs/benchmark/index.html @@ -151,7 +151,6 @@ test_instances = [...] # Training phase... training_solver = LearningSolver(...) training_solver.parallel_solve(train_instances, n_jobs=10) -training_solver.save_state("data.bin") # Test phase... test_solvers = { @@ -161,13 +160,12 @@ test_solvers = { "Strategy C": LearningSolver(...), } benchmark = BenchmarkRunner(test_solvers) -benchmark.load_state("data.bin") -benchmark.fit() +benchmark.fit(train_instances) benchmark.parallel_solve(test_instances, n_jobs=2) print(benchmark.raw_results()) -
The method load_state
loads the saved training data into each one of the provided solvers, while fit
trains their respective ML models. The method parallel_solve
solves the test instances in parallel, and collects solver statistics such as running time and optimal value. Finally, raw_results
produces a table of results (Pandas DataFrame) with the following columns:
The method fit
trains the ML models for each individual solver. The method parallel_solve
solves the test instances in parallel, and collects solver statistics such as running time and optimal value. Finally, raw_results
produces a table of results (Pandas DataFrame) with the following columns:
When iteratively exploring new formulations, encoding and solver parameters, it is often desirable to avoid repeating parts of the benchmark suite. For example, if the baseline solver has not been changed, there is no need to evaluate its performance again and again when making small changes to the remaining solvers. BenchmarkRunner
provides the methods save_results
and load_results
, which can be used to avoid this repetition, as the next example shows:
# Benchmark baseline solvers and save results to a file.
benchmark = BenchmarkRunner(baseline_solvers)
-benchmark.load_state("training_data.bin")
benchmark.parallel_solve(test_instances)
benchmark.save_results("baseline_results.csv")
# Benchmark remaining solvers, loading baseline results from file.
benchmark = BenchmarkRunner(alternative_solvers)
-benchmark.load_state("training_data.bin")
benchmark.load_results("baseline_results.csv")
+benchmark.fit(training_instances)
benchmark.parallel_solve(test_instances)
diff --git a/docs/index.html b/docs/index.html
index 9919cbf..71106a7 100644
--- a/docs/index.html
+++ b/docs/index.html
@@ -268,6 +268,6 @@