BenchmarkRunner: Ensure outputs are discarded

master
Alinson S. Xavier 4 years ago
parent 9689306876
commit 1bb2b48b7d

@ -36,7 +36,7 @@
function load(self)
if self.loaded === nothing
self.loaded = load_jump_instance(self.filename)
self.loaded = load_instance(self.filename)
self.samples = self.loaded.py.samples
end
end

@ -111,7 +111,7 @@ function _check_miplearn_version(file)
end
function load_jump_instance(filename::AbstractString)::JuMPInstance
function load_instance(filename::AbstractString)::JuMPInstance
@info "Reading: $filename"
instance = nothing
time = @elapsed begin
@ -155,4 +155,4 @@ function load_jump_instance(filename::AbstractString)::JuMPInstance
end
export JuMPInstance, save, load_jump_instance
export JuMPInstance, save, load_instance

@ -224,9 +224,10 @@ end
function solve_lp(data::JuMPSolverData; tee::Bool=false)
model, bin_vars = data.model, data.bin_vars
for var in bin_vars
JuMP.unset_binary(var)
JuMP.set_upper_bound(var, 1.0)
JuMP.set_lower_bound(var, 0.0)
~is_fixed(var) || continue
unset_binary(var)
set_upper_bound(var, 1.0)
set_lower_bound(var, 0.0)
end
wallclock_time = @elapsed begin
log = _optimize_and_capture_output!(model, tee=tee)
@ -236,10 +237,11 @@ function solve_lp(data::JuMPSolverData; tee::Bool=false)
obj_value = nothing
else
_update_solution!(data)
obj_value = JuMP.objective_value(model)
obj_value = objective_value(model)
end
for var in bin_vars
JuMP.set_binary(var)
~is_fixed(var) || continue
set_binary(var)
end
return miplearn.solvers.internal.LPSolveStats(
lp_value=obj_value,

@ -35,8 +35,13 @@ function solve!(
solver::LearningSolver,
instance::Instance;
tee::Bool = false,
discard_output::Bool = false,
)
return @python_call solver.py.solve(
instance.py,
tee=tee,
discard_output=discard_output,
)
return @python_call solver.py.solve(instance.py, tee=tee)
end

@ -26,9 +26,11 @@ function parallel_solve!(
n_trials::Int = 3,
)::Nothing
for (solver_name, solver) in runner.solvers
@info "Benchmarking: $solver_name"
for i in 1:n_trials
for instance in instances
stats = solve!(solver, instance)
stats = solve!(solver, instance, discard_output=true)
instance.py.free()
stats["Solver"] = solver_name
stats = Dict(k => isnothing(v) ? missing : v for (k, v) in stats)
if runner.results === nothing

@ -18,7 +18,7 @@ using Gurobi
solver = LearningSolver(Gurobi.Optimizer)
solve!(solver, file_instance)
loaded = load_jump_instance(filename)
loaded = load_instance(filename)
@test length(loaded.py.samples) == 1
end
end

@ -20,7 +20,7 @@ using MIPLearn
@test isfile(filename)
# Read model from file
loaded = load_jump_instance(filename)
loaded = load_instance(filename)
x = variable_by_name(loaded.model, "x")
@test loaded.model.ext[:miplearn][:variable_features][x] == [1.0]
@test loaded.model.ext[:miplearn][:variable_categories][x] == "cat1"

@ -35,4 +35,12 @@ using Gurobi
loaded = load_solver(filename)
@test loaded.py.components == "Placeholder"
end
@testset "Discard output" begin
instance = build_knapsack_file_instance()
solver = LearningSolver(Gurobi.Optimizer)
solve!(solver, instance, discard_output=true)
loaded = load_instance(instance.filename)
@test length(loaded.py.samples) == 0
end
end

@ -8,24 +8,39 @@ using Gurobi
@testset "BenchmarkRunner" begin
# Configure benchmark suite
benchmark = BenchmarkRunner(
solvers=Dict(
"Baseline" => LearningSolver(Gurobi.Optimizer, components=[]),
"Proposed" => LearningSolver(Gurobi.Optimizer),
),
)
# Solve instances in parallel
# Initialie instances and generate training data
instances = [
build_knapsack_file_instance(),
build_knapsack_file_instance(),
]
parallel_solve!(benchmark, instances)
parallel_solve!(
LearningSolver(Gurobi.Optimizer),
instances,
)
# Fit and benchmark
benchmark = BenchmarkRunner(
solvers=Dict(
"baseline" => LearningSolver(
Gurobi.Optimizer,
components=[],
),
"ml-exact" => LearningSolver(
Gurobi.Optimizer,
),
"ml-heur" => LearningSolver(
Gurobi.Optimizer,
mode="heuristic",
),
),
)
fit!(benchmark, instances)
parallel_solve!(benchmark, instances, n_trials=1)
# Write CSV
csv_filename = tempname()
write_csv!(benchmark, csv_filename)
@test isfile(csv_filename)
csv = DataFrame(CSV.File(csv_filename))
@test size(csv)[1] == 6
end

Loading…
Cancel
Save