mirror of
https://github.com/ANL-CEEESA/MIPLearn.jl.git
synced 2025-12-06 00:18:51 -06:00
BenchmarkRunner: Ensure outputs are discarded
This commit is contained in:
@@ -36,7 +36,7 @@
|
|||||||
|
|
||||||
function load(self)
|
function load(self)
|
||||||
if self.loaded === nothing
|
if self.loaded === nothing
|
||||||
self.loaded = load_jump_instance(self.filename)
|
self.loaded = load_instance(self.filename)
|
||||||
self.samples = self.loaded.py.samples
|
self.samples = self.loaded.py.samples
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|||||||
@@ -111,7 +111,7 @@ function _check_miplearn_version(file)
|
|||||||
end
|
end
|
||||||
|
|
||||||
|
|
||||||
function load_jump_instance(filename::AbstractString)::JuMPInstance
|
function load_instance(filename::AbstractString)::JuMPInstance
|
||||||
@info "Reading: $filename"
|
@info "Reading: $filename"
|
||||||
instance = nothing
|
instance = nothing
|
||||||
time = @elapsed begin
|
time = @elapsed begin
|
||||||
@@ -155,4 +155,4 @@ function load_jump_instance(filename::AbstractString)::JuMPInstance
|
|||||||
end
|
end
|
||||||
|
|
||||||
|
|
||||||
export JuMPInstance, save, load_jump_instance
|
export JuMPInstance, save, load_instance
|
||||||
|
|||||||
@@ -224,9 +224,10 @@ end
|
|||||||
function solve_lp(data::JuMPSolverData; tee::Bool=false)
|
function solve_lp(data::JuMPSolverData; tee::Bool=false)
|
||||||
model, bin_vars = data.model, data.bin_vars
|
model, bin_vars = data.model, data.bin_vars
|
||||||
for var in bin_vars
|
for var in bin_vars
|
||||||
JuMP.unset_binary(var)
|
~is_fixed(var) || continue
|
||||||
JuMP.set_upper_bound(var, 1.0)
|
unset_binary(var)
|
||||||
JuMP.set_lower_bound(var, 0.0)
|
set_upper_bound(var, 1.0)
|
||||||
|
set_lower_bound(var, 0.0)
|
||||||
end
|
end
|
||||||
wallclock_time = @elapsed begin
|
wallclock_time = @elapsed begin
|
||||||
log = _optimize_and_capture_output!(model, tee=tee)
|
log = _optimize_and_capture_output!(model, tee=tee)
|
||||||
@@ -236,10 +237,11 @@ function solve_lp(data::JuMPSolverData; tee::Bool=false)
|
|||||||
obj_value = nothing
|
obj_value = nothing
|
||||||
else
|
else
|
||||||
_update_solution!(data)
|
_update_solution!(data)
|
||||||
obj_value = JuMP.objective_value(model)
|
obj_value = objective_value(model)
|
||||||
end
|
end
|
||||||
for var in bin_vars
|
for var in bin_vars
|
||||||
JuMP.set_binary(var)
|
~is_fixed(var) || continue
|
||||||
|
set_binary(var)
|
||||||
end
|
end
|
||||||
return miplearn.solvers.internal.LPSolveStats(
|
return miplearn.solvers.internal.LPSolveStats(
|
||||||
lp_value=obj_value,
|
lp_value=obj_value,
|
||||||
|
|||||||
@@ -35,8 +35,13 @@ function solve!(
|
|||||||
solver::LearningSolver,
|
solver::LearningSolver,
|
||||||
instance::Instance;
|
instance::Instance;
|
||||||
tee::Bool = false,
|
tee::Bool = false,
|
||||||
|
discard_output::Bool = false,
|
||||||
)
|
)
|
||||||
return @python_call solver.py.solve(instance.py, tee=tee)
|
return @python_call solver.py.solve(
|
||||||
|
instance.py,
|
||||||
|
tee=tee,
|
||||||
|
discard_output=discard_output,
|
||||||
|
)
|
||||||
end
|
end
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -26,9 +26,11 @@ function parallel_solve!(
|
|||||||
n_trials::Int = 3,
|
n_trials::Int = 3,
|
||||||
)::Nothing
|
)::Nothing
|
||||||
for (solver_name, solver) in runner.solvers
|
for (solver_name, solver) in runner.solvers
|
||||||
|
@info "Benchmarking: $solver_name"
|
||||||
for i in 1:n_trials
|
for i in 1:n_trials
|
||||||
for instance in instances
|
for instance in instances
|
||||||
stats = solve!(solver, instance)
|
stats = solve!(solver, instance, discard_output=true)
|
||||||
|
instance.py.free()
|
||||||
stats["Solver"] = solver_name
|
stats["Solver"] = solver_name
|
||||||
stats = Dict(k => isnothing(v) ? missing : v for (k, v) in stats)
|
stats = Dict(k => isnothing(v) ? missing : v for (k, v) in stats)
|
||||||
if runner.results === nothing
|
if runner.results === nothing
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ using Gurobi
|
|||||||
solver = LearningSolver(Gurobi.Optimizer)
|
solver = LearningSolver(Gurobi.Optimizer)
|
||||||
solve!(solver, file_instance)
|
solve!(solver, file_instance)
|
||||||
|
|
||||||
loaded = load_jump_instance(filename)
|
loaded = load_instance(filename)
|
||||||
@test length(loaded.py.samples) == 1
|
@test length(loaded.py.samples) == 1
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ using MIPLearn
|
|||||||
@test isfile(filename)
|
@test isfile(filename)
|
||||||
|
|
||||||
# Read model from file
|
# Read model from file
|
||||||
loaded = load_jump_instance(filename)
|
loaded = load_instance(filename)
|
||||||
x = variable_by_name(loaded.model, "x")
|
x = variable_by_name(loaded.model, "x")
|
||||||
@test loaded.model.ext[:miplearn][:variable_features][x] == [1.0]
|
@test loaded.model.ext[:miplearn][:variable_features][x] == [1.0]
|
||||||
@test loaded.model.ext[:miplearn][:variable_categories][x] == "cat1"
|
@test loaded.model.ext[:miplearn][:variable_categories][x] == "cat1"
|
||||||
|
|||||||
@@ -35,4 +35,12 @@ using Gurobi
|
|||||||
loaded = load_solver(filename)
|
loaded = load_solver(filename)
|
||||||
@test loaded.py.components == "Placeholder"
|
@test loaded.py.components == "Placeholder"
|
||||||
end
|
end
|
||||||
|
|
||||||
|
@testset "Discard output" begin
|
||||||
|
instance = build_knapsack_file_instance()
|
||||||
|
solver = LearningSolver(Gurobi.Optimizer)
|
||||||
|
solve!(solver, instance, discard_output=true)
|
||||||
|
loaded = load_instance(instance.filename)
|
||||||
|
@test length(loaded.py.samples) == 0
|
||||||
|
end
|
||||||
end
|
end
|
||||||
|
|||||||
@@ -8,24 +8,39 @@ using Gurobi
|
|||||||
|
|
||||||
|
|
||||||
@testset "BenchmarkRunner" begin
|
@testset "BenchmarkRunner" begin
|
||||||
# Configure benchmark suite
|
# Initialie instances and generate training data
|
||||||
benchmark = BenchmarkRunner(
|
|
||||||
solvers=Dict(
|
|
||||||
"Baseline" => LearningSolver(Gurobi.Optimizer, components=[]),
|
|
||||||
"Proposed" => LearningSolver(Gurobi.Optimizer),
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
# Solve instances in parallel
|
|
||||||
instances = [
|
instances = [
|
||||||
build_knapsack_file_instance(),
|
build_knapsack_file_instance(),
|
||||||
build_knapsack_file_instance(),
|
build_knapsack_file_instance(),
|
||||||
]
|
]
|
||||||
parallel_solve!(benchmark, instances)
|
parallel_solve!(
|
||||||
|
LearningSolver(Gurobi.Optimizer),
|
||||||
|
instances,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Fit and benchmark
|
||||||
|
benchmark = BenchmarkRunner(
|
||||||
|
solvers=Dict(
|
||||||
|
"baseline" => LearningSolver(
|
||||||
|
Gurobi.Optimizer,
|
||||||
|
components=[],
|
||||||
|
),
|
||||||
|
"ml-exact" => LearningSolver(
|
||||||
|
Gurobi.Optimizer,
|
||||||
|
),
|
||||||
|
"ml-heur" => LearningSolver(
|
||||||
|
Gurobi.Optimizer,
|
||||||
|
mode="heuristic",
|
||||||
|
),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
fit!(benchmark, instances)
|
||||||
|
parallel_solve!(benchmark, instances, n_trials=1)
|
||||||
|
|
||||||
# Write CSV
|
# Write CSV
|
||||||
csv_filename = tempname()
|
csv_filename = tempname()
|
||||||
write_csv!(benchmark, csv_filename)
|
write_csv!(benchmark, csv_filename)
|
||||||
@test isfile(csv_filename)
|
@test isfile(csv_filename)
|
||||||
csv = DataFrame(CSV.File(csv_filename))
|
csv = DataFrame(CSV.File(csv_filename))
|
||||||
|
@test size(csv)[1] == 6
|
||||||
end
|
end
|
||||||
|
|||||||
Reference in New Issue
Block a user