diff --git a/src/instance/file.jl b/src/instance/file.jl index a475656..32a3fd3 100644 --- a/src/instance/file.jl +++ b/src/instance/file.jl @@ -36,7 +36,7 @@ function load(self) if self.loaded === nothing - self.loaded = load_jump_instance(self.filename) + self.loaded = load_instance(self.filename) self.samples = self.loaded.py.samples end end diff --git a/src/instance/jump.jl b/src/instance/jump.jl index 27ae880..e80cd87 100644 --- a/src/instance/jump.jl +++ b/src/instance/jump.jl @@ -111,7 +111,7 @@ function _check_miplearn_version(file) end -function load_jump_instance(filename::AbstractString)::JuMPInstance +function load_instance(filename::AbstractString)::JuMPInstance @info "Reading: $filename" instance = nothing time = @elapsed begin @@ -155,4 +155,4 @@ function load_jump_instance(filename::AbstractString)::JuMPInstance end -export JuMPInstance, save, load_jump_instance +export JuMPInstance, save, load_instance diff --git a/src/solvers/jump.jl b/src/solvers/jump.jl index 9dc8fef..435d5f9 100644 --- a/src/solvers/jump.jl +++ b/src/solvers/jump.jl @@ -224,9 +224,10 @@ end function solve_lp(data::JuMPSolverData; tee::Bool=false) model, bin_vars = data.model, data.bin_vars for var in bin_vars - JuMP.unset_binary(var) - JuMP.set_upper_bound(var, 1.0) - JuMP.set_lower_bound(var, 0.0) + ~is_fixed(var) || continue + unset_binary(var) + set_upper_bound(var, 1.0) + set_lower_bound(var, 0.0) end wallclock_time = @elapsed begin log = _optimize_and_capture_output!(model, tee=tee) @@ -236,10 +237,11 @@ function solve_lp(data::JuMPSolverData; tee::Bool=false) obj_value = nothing else _update_solution!(data) - obj_value = JuMP.objective_value(model) + obj_value = objective_value(model) end for var in bin_vars - JuMP.set_binary(var) + ~is_fixed(var) || continue + set_binary(var) end return miplearn.solvers.internal.LPSolveStats( lp_value=obj_value, diff --git a/src/solvers/learning.jl b/src/solvers/learning.jl index 66609a8..f3cb1f7 100644 --- a/src/solvers/learning.jl +++ b/src/solvers/learning.jl @@ -35,8 +35,13 @@ function solve!( solver::LearningSolver, instance::Instance; tee::Bool = false, + discard_output::Bool = false, ) - return @python_call solver.py.solve(instance.py, tee=tee) + return @python_call solver.py.solve( + instance.py, + tee=tee, + discard_output=discard_output, + ) end diff --git a/src/utils/benchmark.jl b/src/utils/benchmark.jl index 02f26ac..ce98cce 100644 --- a/src/utils/benchmark.jl +++ b/src/utils/benchmark.jl @@ -26,9 +26,11 @@ function parallel_solve!( n_trials::Int = 3, )::Nothing for (solver_name, solver) in runner.solvers + @info "Benchmarking: $solver_name" for i in 1:n_trials for instance in instances - stats = solve!(solver, instance) + stats = solve!(solver, instance, discard_output=true) + instance.py.free() stats["Solver"] = solver_name stats = Dict(k => isnothing(v) ? missing : v for (k, v) in stats) if runner.results === nothing diff --git a/test/instance/file_test.jl b/test/instance/file_test.jl index 3279bb2..7bf9e36 100644 --- a/test/instance/file_test.jl +++ b/test/instance/file_test.jl @@ -18,7 +18,7 @@ using Gurobi solver = LearningSolver(Gurobi.Optimizer) solve!(solver, file_instance) - loaded = load_jump_instance(filename) + loaded = load_instance(filename) @test length(loaded.py.samples) == 1 end end diff --git a/test/instance/jump_test.jl b/test/instance/jump_test.jl index 605b2a8..e7f1f88 100644 --- a/test/instance/jump_test.jl +++ b/test/instance/jump_test.jl @@ -20,7 +20,7 @@ using MIPLearn @test isfile(filename) # Read model from file - loaded = load_jump_instance(filename) + loaded = load_instance(filename) x = variable_by_name(loaded.model, "x") @test loaded.model.ext[:miplearn][:variable_features][x] == [1.0] @test loaded.model.ext[:miplearn][:variable_categories][x] == "cat1" diff --git a/test/solvers/learning_test.jl b/test/solvers/learning_test.jl index 675d079..f4d1d4d 100644 --- a/test/solvers/learning_test.jl +++ b/test/solvers/learning_test.jl @@ -35,4 +35,12 @@ using Gurobi loaded = load_solver(filename) @test loaded.py.components == "Placeholder" end + + @testset "Discard output" begin + instance = build_knapsack_file_instance() + solver = LearningSolver(Gurobi.Optimizer) + solve!(solver, instance, discard_output=true) + loaded = load_instance(instance.filename) + @test length(loaded.py.samples) == 0 + end end diff --git a/test/utils/benchmark_test.jl b/test/utils/benchmark_test.jl index 52cda27..9cd3611 100644 --- a/test/utils/benchmark_test.jl +++ b/test/utils/benchmark_test.jl @@ -8,24 +8,39 @@ using Gurobi @testset "BenchmarkRunner" begin - # Configure benchmark suite - benchmark = BenchmarkRunner( - solvers=Dict( - "Baseline" => LearningSolver(Gurobi.Optimizer, components=[]), - "Proposed" => LearningSolver(Gurobi.Optimizer), - ), - ) - - # Solve instances in parallel + # Initialie instances and generate training data instances = [ build_knapsack_file_instance(), build_knapsack_file_instance(), ] - parallel_solve!(benchmark, instances) + parallel_solve!( + LearningSolver(Gurobi.Optimizer), + instances, + ) + + # Fit and benchmark + benchmark = BenchmarkRunner( + solvers=Dict( + "baseline" => LearningSolver( + Gurobi.Optimizer, + components=[], + ), + "ml-exact" => LearningSolver( + Gurobi.Optimizer, + ), + "ml-heur" => LearningSolver( + Gurobi.Optimizer, + mode="heuristic", + ), + ), + ) + fit!(benchmark, instances) + parallel_solve!(benchmark, instances, n_trials=1) # Write CSV csv_filename = tempname() write_csv!(benchmark, csv_filename) @test isfile(csv_filename) csv = DataFrame(CSV.File(csv_filename)) + @test size(csv)[1] == 6 end