40 Commits

Author SHA1 Message Date
aa11db99a2 FisSal2011: Adjust constants 2025-10-06 12:52:58 -05:00
a4ff65275e SplitFreeVars: Preserve var order 2025-10-06 12:51:27 -05:00
295e29c351 DualGMI: multiple fixes 2025-10-02 10:10:42 -05:00
67e706d727 FisSal2011: Write H5 2025-08-13 15:33:48 -05:00
407312e129 FisSal2011: Keep only active cuts at the end 2025-08-13 14:44:41 -05:00
e2e69415c1 FisSal2011: Implement faster get/set basis for Gurobi 2025-08-08 22:44:43 -05:00
9713873a34 FisSal2011: Large LP: Add cuts in small batches 2025-08-08 22:08:50 -05:00
e2906a0a7e FisSal2011: Accelerate creation of obj function 2025-08-08 21:49:41 -05:00
3ca5a4fec7 FisSal2011: Small fix 2025-08-08 21:32:11 -05:00
84acd6b72c collect_gmi_FisSal2011: Accelerate appending unique cuts 2025-08-08 21:11:05 -05:00
8f3eb8adc4 FisSal2011: Implement miplearn variant; minor fixes 2025-08-08 20:25:51 -05:00
65a6024c36 assert_cuts_off: Improve performance 2025-08-08 15:46:24 -05:00
bb59362571 compute_tableau: Compute directly in compressed row format 2025-08-08 15:30:01 -05:00
5e2b0c2958 FisSal2011: Improve estimated tableau density 2025-08-08 15:06:15 -05:00
37f3abee42 FisSal2011: Speed up hash calculation 2025-08-08 14:50:07 -05:00
1296182744 compute_tableau: Improve efficiency 2025-08-08 13:49:26 -05:00
4158fccf12 compute_tableau: Reduce memory requirements 2025-08-07 22:09:15 -05:00
97c5813e59 FisSal2011: Change some default args; remove basis_seen 2025-08-07 21:47:30 -05:00
55b0a2bbca AddSlackVariables: Improve performance 2025-08-07 21:42:24 -05:00
b8d836de10 FisSal2011: Implement early termination; improve log 2025-08-04 23:29:59 -05:00
1c44cb4e86 Fix incorrect integer slacks 2025-08-04 23:29:00 -05:00
8edd031bbe FisSal2011: Add multiple variants 2025-08-04 21:00:36 -05:00
0a0d133161 FisSal2011: clean up, improve gap closure on MIPLIB 3 (65.5%) 2025-08-04 16:15:38 -05:00
0b5ec4740e FisSal2011: partial implementation 2025-08-04 15:17:17 -05:00
05e7d1619c Make dual GMI cuts stronger 2025-08-01 09:43:09 -05:00
d351d84d58 DualGMI: Skip empty H5 files 2025-07-28 12:54:42 -05:00
1aaf4ebdc4 DualGmi: Revert early stop for invalid cuts 2025-07-22 13:43:35 -05:00
5662e5c2e6 DualGMI: Add time limit 2025-07-22 12:06:37 -05:00
63bbd750fb DualGMI: compression: Skip empty files 2025-07-17 17:07:20 -05:00
6c903d0b19 DualGMI: Fix type errors 2025-07-17 13:02:45 -05:00
c3a8fa6a08 DualGMI: Use compressed basis representation 2025-07-17 12:22:11 -05:00
5c522dbc5f DualGMI: Reimplement Expert using kNN component 2025-07-17 11:04:41 -05:00
a9f1b2c394 JumpSolver: skip obj_coeffs_quad unless problem has quad terms 2025-07-17 10:45:58 -05:00
2ea0043c03 Add support for MIQPs; implement max cut model 2025-06-11 15:38:22 -05:00
9ac2f74856 BB/log: Increase node & parent columnd width 2025-04-18 16:05:01 -05:00
672bb220c1 Disable precompilation 2024-12-10 15:12:00 -06:00
20a7cfb42d BB: Make compatible with MOI 1.32+ 2024-12-10 15:09:00 -06:00
b6ba75c3dc Add compat section: PrecompileTools, SCIP 2024-12-10 12:20:25 -06:00
a5a3690bb6 Bump to MIPLearn 0.4.2 2024-12-10 11:47:26 -06:00
e5a2550c21 Bump to MIPLearn 0.4.1 2024-12-10 11:10:29 -06:00
33 changed files with 1517 additions and 889 deletions

View File

@@ -1,11 +1,12 @@
name = "MIPLearn" name = "MIPLearn"
uuid = "2b1277c3-b477-4c49-a15e-7ba350325c68" uuid = "2b1277c3-b477-4c49-a15e-7ba350325c68"
authors = ["Alinson S Xavier <git@axavier.org>"] authors = ["Alinson S Xavier <git@axavier.org>"]
version = "0.4.0" version = "0.4.2"
[deps] [deps]
Conda = "8f4d0f93-b110-5947-807f-2305c1781a2d" Conda = "8f4d0f93-b110-5947-807f-2305c1781a2d"
DataStructures = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8" DataStructures = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8"
Gurobi = "2e9cd046-0924-5485-92f1-d5272153d98b"
HDF5 = "f67ccb44-e63f-5c2f-98bd-6dc0ccc4ba2f" HDF5 = "f67ccb44-e63f-5c2f-98bd-6dc0ccc4ba2f"
HiGHS = "87dc4568-4c63-4d18-b0c0-bb2238e4078b" HiGHS = "87dc4568-4c63-4d18-b0c0-bb2238e4078b"
JLD2 = "033835bb-8acc-5ee8-8aae-3f567f8a3819" JLD2 = "033835bb-8acc-5ee8-8aae-3f567f8a3819"
@@ -28,6 +29,7 @@ TimerOutputs = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f"
[compat] [compat]
Conda = "1" Conda = "1"
DataStructures = "0.18" DataStructures = "0.18"
Gurobi = "1.7.5"
HDF5 = "0.16" HDF5 = "0.16"
HiGHS = "1" HiGHS = "1"
JLD2 = "0.4" JLD2 = "0.4"
@@ -36,8 +38,10 @@ JuMP = "1"
KLU = "0.4" KLU = "0.4"
MathOptInterface = "1" MathOptInterface = "1"
OrderedCollections = "1" OrderedCollections = "1"
PrecompileTools = "1"
PyCall = "1" PyCall = "1"
Requires = "1" Requires = "1"
SCIP = "0.12"
Statistics = "1" Statistics = "1"
TimerOutputs = "0.5" TimerOutputs = "0.5"
julia = "1" julia = "1"

2
deps/build.jl vendored
View File

@@ -5,7 +5,7 @@ function install_miplearn()
Conda.update() Conda.update()
pip = joinpath(dirname(pyimport("sys").executable), "pip") pip = joinpath(dirname(pyimport("sys").executable), "pip")
isfile(pip) || error("$pip: invalid path") isfile(pip) || error("$pip: invalid path")
run(`$pip install miplearn==0.4.0`) run(`$pip install miplearn==0.4.4`)
end end
install_miplearn() install_miplearn()

View File

@@ -6,7 +6,7 @@ using Printf
function print_progress_header() function print_progress_header()
@printf( @printf(
"%8s %9s %9s %13s %13s %9s %6s %13s %6s %-24s %9s %9s %6s %6s", "%8s %9s %9s %13s %13s %9s %9s %13s %9s %-24s %9s %9s %6s %6s",
"time", "time",
"processed", "processed",
"pending", "pending",
@@ -31,9 +31,9 @@ function print_progress(
node::Node; node::Node;
time_elapsed::Float64, time_elapsed::Float64,
print_interval::Int, print_interval::Int,
primal_update::Bool primal_update::Bool,
)::Nothing )::Nothing
if (pool.processed % print_interval == 0) || isempty(pool.pending) if (pool.processed % print_interval == 0) || isempty(pool.pending) || primal_update
if isempty(node.branch_vars) if isempty(node.branch_vars)
branch_var_name = "---" branch_var_name = "---"
branch_lb = "---" branch_lb = "---"
@@ -46,7 +46,7 @@ function print_progress(
branch_ub = @sprintf("%9.2f", last(node.branch_ub)) branch_ub = @sprintf("%9.2f", last(node.branch_ub))
end end
@printf( @printf(
"%8.2f %9d %9d %13.6e %13.6e %9.2e %6d %13.6e %6s %-24s %9s %9s %6d %6d", "%8.2f %9d %9d %13.6e %13.6e %9.2e %9d %13.6e %9s %-24s %9s %9s %6d %6d",
time_elapsed, time_elapsed,
pool.processed, pool.processed,
length(pool.processing) + length(pool.pending), length(pool.processing) + length(pool.pending),

View File

@@ -134,7 +134,11 @@ function _get_int_variables(
var_ub = constr.upper var_ub = constr.upper
MOI.delete(optimizer, _upper_bound_index(var)) MOI.delete(optimizer, _upper_bound_index(var))
end end
MOI.add_constraint(optimizer, var, MOI.Interval(var_lb, var_ub)) MOI.add_constraint(
optimizer,
MOI.VariableIndex(var.index),
MOI.Interval(var_lb, var_ub),
)
end end
push!(vars, var) push!(vars, var)
push!(lb, var_lb) push!(lb, var_lb)

View File

@@ -8,10 +8,10 @@ import Base.Threads: threadid
function take( function take(
pool::NodePool; pool::NodePool;
suggestions::Array{Node}=[], suggestions::Array{Node} = [],
time_remaining::Float64, time_remaining::Float64,
gap_limit::Float64, gap_limit::Float64,
node_limit::Int node_limit::Int,
)::Union{Symbol,Node} )::Union{Symbol,Node}
t = threadid() t = threadid()
lock(pool.lock) do lock(pool.lock) do
@@ -53,8 +53,8 @@ function offer(
pool::NodePool; pool::NodePool;
parent_node::Union{Nothing,Node}, parent_node::Union{Nothing,Node},
child_nodes::Vector{Node}, child_nodes::Vector{Node},
time_elapsed::Float64=0.0, time_elapsed::Float64 = 0.0,
print_interval::Int=100 print_interval::Int = 100,
)::Nothing )::Nothing
lock(pool.lock) do lock(pool.lock) do
primal_update = false primal_update = false
@@ -101,16 +101,15 @@ function offer(
# Update branching variable history # Update branching variable history
branch_var = child_nodes[1].branch_vars[end] branch_var = child_nodes[1].branch_vars[end]
offset = findfirst(isequal(branch_var), parent_node.fractional_variables) offset = findfirst(isequal(branch_var), parent_node.fractional_variables)
if offset !== nothing
x = parent_node.fractional_values[offset] x = parent_node.fractional_values[offset]
obj_change_up = child_nodes[1].obj - parent_node.obj obj_change_up = child_nodes[1].obj - parent_node.obj
obj_change_down = child_nodes[2].obj - parent_node.obj obj_change_down = child_nodes[2].obj - parent_node.obj
_update_var_history( _update_var_history(
pool=pool, pool = pool,
var=branch_var, var = branch_var,
x=x, x = x,
obj_change_down=obj_change_down, obj_change_down = obj_change_down,
obj_change_up=obj_change_up, obj_change_up = obj_change_up,
) )
# Update global history # Update global history
pool.history.avg_pseudocost_up = pool.history.avg_pseudocost_up =
@@ -118,15 +117,14 @@ function offer(
pool.history.avg_pseudocost_down = pool.history.avg_pseudocost_down =
mean(vh.pseudocost_down for vh in values(pool.var_history)) mean(vh.pseudocost_down for vh in values(pool.var_history))
end end
end
for node in child_nodes for node in child_nodes
print_progress( print_progress(
pool, pool,
node, node,
time_elapsed=time_elapsed, time_elapsed = time_elapsed,
print_interval=print_interval, print_interval = print_interval,
primal_update=isfinite(node.obj) && isempty(node.fractional_variables), primal_update = isfinite(node.obj) && isempty(node.fractional_variables),
) )
end end
end end
@@ -138,7 +136,7 @@ function _update_var_history(;
var::Variable, var::Variable,
x::Float64, x::Float64,
obj_change_down::Float64, obj_change_down::Float64,
obj_change_up::Float64 obj_change_up::Float64,
)::Nothing )::Nothing
# Create new history entry # Create new history entry
if var keys(pool.var_history) if var keys(pool.var_history)

View File

@@ -10,22 +10,16 @@ import ..H5File
function solve!( function solve!(
mip::MIP; mip::MIP;
time_limit::Float64=Inf, time_limit::Float64 = Inf,
node_limit::Int=typemax(Int), node_limit::Int = typemax(Int),
gap_limit::Float64=1e-4, gap_limit::Float64 = 1e-4,
print_interval::Int=5, print_interval::Int = 5,
initial_primal_bound::Float64=Inf, initial_primal_bound::Float64 = Inf,
branch_rule::VariableBranchingRule=ReliabilityBranching(), branch_rule::VariableBranchingRule = ReliabilityBranching(),
enable_plunging=true, enable_plunging = true,
replay=nothing )::NodePool
)::Tuple{NodePool,ReplayInfo}
if replay === nothing
replay = ReplayInfo()
end
time_initial = time() time_initial = time()
pool = NodePool(mip=mip, next_index=replay.next_index) pool = NodePool(mip = mip)
pool.primal_bound = initial_primal_bound pool.primal_bound = initial_primal_bound
root_node = _create_node(mip) root_node = _create_node(mip)
@@ -40,9 +34,9 @@ function solve!(
offer( offer(
pool, pool,
parent_node=nothing, parent_node = nothing,
child_nodes=[root_node], child_nodes = [root_node],
print_interval=print_interval, print_interval = print_interval,
) )
@threads for t = 1:nthreads() @threads for t = 1:nthreads()
child_one, child_zero, suggestions = nothing, nothing, Node[] child_one, child_zero, suggestions = nothing, nothing, Node[]
@@ -53,10 +47,10 @@ function solve!(
end end
node = take( node = take(
pool, pool,
suggestions=suggestions, suggestions = suggestions,
time_remaining=time_limit - time_elapsed, time_remaining = time_limit - time_elapsed,
node_limit=node_limit, node_limit = node_limit,
gap_limit=gap_limit, gap_limit = gap_limit,
) )
if node == :END if node == :END
break break
@@ -70,25 +64,10 @@ function solve!(
@assert status == :Optimal @assert status == :Optimal
_unset_node_bounds(node) _unset_node_bounds(node)
if node.index in keys(replay.node_decisions)
decision = replay.node_decisions[node.index]
ids = decision.ids
branch_var = decision.branch_var
var_value = decision.var_value
else
# Find branching variable # Find branching variable
ids = generate_indices(pool, 2) ids = generate_indices(pool, 2)
branch_var = find_branching_var(branch_rule, node, pool) branch_var = find_branching_var(branch_rule, node, pool)
# Query current fractional value
offset = findfirst(isequal(branch_var), node.fractional_variables)
var_value = node.fractional_values[offset]
# Update replay
decision = ReplayNodeDecision(; branch_var, var_value, ids)
replay.node_decisions[node.index] = decision
end
# Find current variable lower and upper bounds # Find current variable lower and upper bounds
offset = findfirst(isequal(branch_var), mip.int_vars) offset = findfirst(isequal(branch_var), mip.int_vars)
var_lb = mip.int_vars_lb[offset] var_lb = mip.int_vars_lb[offset]
@@ -100,43 +79,46 @@ function solve!(
end end
end end
# Query current fractional value
offset = findfirst(isequal(branch_var), node.fractional_variables)
var_value = node.fractional_values[offset]
child_zero = _create_node( child_zero = _create_node(
mip, mip,
index=ids[2], index = ids[2],
parent=node, parent = node,
branch_var=branch_var, branch_var = branch_var,
branch_var_lb=var_lb, branch_var_lb = var_lb,
branch_var_ub=floor(var_value), branch_var_ub = floor(var_value),
) )
child_one = _create_node( child_one = _create_node(
mip, mip,
index=ids[1], index = ids[1],
parent=node, parent = node,
branch_var=branch_var, branch_var = branch_var,
branch_var_lb=ceil(var_value), branch_var_lb = ceil(var_value),
branch_var_ub=var_ub, branch_var_ub = var_ub,
) )
offer( offer(
pool, pool,
parent_node=node, parent_node = node,
child_nodes=[child_one, child_zero], child_nodes = [child_one, child_zero],
time_elapsed=time_elapsed, time_elapsed = time_elapsed,
print_interval=print_interval, print_interval = print_interval,
) )
end end
end end
end end
replay.next_index = pool.next_index return pool
return pool, replay
end end
function _create_node( function _create_node(
mip; mip;
index::Int=0, index::Int = 0,
parent::Union{Nothing,Node}=nothing, parent::Union{Nothing,Node} = nothing,
branch_var::Union{Nothing,Variable}=nothing, branch_var::Union{Nothing,Variable} = nothing,
branch_var_lb::Union{Nothing,Float64}=nothing, branch_var_lb::Union{Nothing,Float64} = nothing,
branch_var_ub::Union{Nothing,Float64}=nothing branch_var_ub::Union{Nothing,Float64} = nothing,
)::Node )::Node
if parent === nothing if parent === nothing
branch_vars = Variable[] branch_vars = Variable[]

View File

@@ -72,14 +72,3 @@ Base.@kwdef mutable struct NodePool
history::History = History() history::History = History()
var_history::Dict{Variable,VariableHistory} = Dict() var_history::Dict{Variable,VariableHistory} = Dict()
end end
Base.@kwdef struct ReplayNodeDecision
branch_var
var_value
ids
end
Base.@kwdef mutable struct ReplayInfo
node_decisions::Dict{Int,ReplayNodeDecision} = Dict()
next_index::Int = 1
end

View File

@@ -185,7 +185,7 @@ function collect_gmi(
) )
end end
function select_gmi_rows(data, basis, x; max_rows = 10, atol = 1e-4) function select_gmi_rows(data, basis, x; max_rows = 10, atol = 0.001)
candidate_rows = [ candidate_rows = [
r for r = 1:length(basis.var_basic) if ( r for r = 1:length(basis.var_basic) if (
(data.var_types[basis.var_basic[r]] != 'C') && (data.var_types[basis.var_basic[r]] != 'C') &&
@@ -199,23 +199,82 @@ function select_gmi_rows(data, basis, x; max_rows = 10, atol = 1e-4)
return [candidate_rows[perm[i]] for i = 1:min(length(perm), max_rows)] return [candidate_rows[perm[i]] for i = 1:min(length(perm), max_rows)]
end end
# function compute_gmi(data::ProblemData, tableau::Tableau)::ConstraintSet
# @timeit "Initialization" begin
# nrows, ncols = size(tableau.lhs)
# ub = Float64[Inf for _ = 1:nrows]
# lb = Float64[0.999 for _ = 1:nrows]
# tableau_I, tableau_J, tableau_V = findnz(tableau.lhs)
# lhs_I = Int[]
# lhs_J = Int[]
# lhs_V = Float64[]
# end
# @timeit "Compute coefficients" begin
# for k = 1:nnz(tableau.lhs)
# i::Int = tableau_I[k]
# j::Int = tableau_J[k]
# v::Float64 = 0.0
# frac_alpha_j = frac(tableau_V[k])
# alpha_j = tableau_V[k]
# beta = frac(tableau.rhs[i])
# if data.var_types[j] == 'C'
# if alpha_j >= 0
# v = alpha_j / beta
# else
# v = -alpha_j / (1 - beta)
# end
# else
# if frac_alpha_j < beta
# v = frac_alpha_j / beta
# else
# v = (1 - frac_alpha_j) / (1 - beta)
# end
# end
# if abs(v) > 1e-8
# push!(lhs_I, i)
# push!(lhs_J, tableau_J[k])
# push!(lhs_V, v)
# end
# end
# end
# @timeit "Convert to ConstraintSet" begin
# lhs = sparse(lhs_I, lhs_J, lhs_V, nrows, ncols)
# cs = ConstraintSet(; lhs, ub, lb)
# end
# return cs
# end
function compute_gmi(data::ProblemData, tableau::Tableau)::ConstraintSet function compute_gmi(data::ProblemData, tableau::Tableau)::ConstraintSet
nrows, ncols = size(tableau.lhs) @timeit "Initialization" begin
ub = Float64[Inf for _ = 1:nrows] nrows::Int, ncols::Int = size(tableau.lhs)
lb = Float64[0.9999 for _ = 1:nrows] var_types::Vector{Char} = data.var_types
tableau_I, tableau_J, tableau_V = findnz(tableau.lhs) tableau_rhs::Vector{Float64} = tableau.rhs
lhs_I = Int[] tableau_I::Vector{Int}, tableau_J::Vector{Int}, tableau_V::Vector{Float64} = findnz(tableau.lhs)
lhs_J = Int[] end
lhs_V = Float64[]
@timeit "Pre-allocation" begin
cut_ub::Vector{Float64} = fill(Inf, nrows)
cut_lb::Vector{Float64} = fill(0.999, nrows)
nnz_tableau::Int = length(tableau_I)
cut_lhs_I::Vector{Int} = Vector{Int}(undef, nnz_tableau)
cut_lhs_J::Vector{Int} = Vector{Int}(undef, nnz_tableau)
cut_lhs_V::Vector{Float64} = Vector{Float64}(undef, nnz_tableau)
cut_hash::Vector{UInt64} = zeros(UInt64, nrows)
nnz_count::Int = 0
end
@timeit "Compute coefficients" begin @timeit "Compute coefficients" begin
for k = 1:nnz(tableau.lhs) @inbounds for k = 1:nnz_tableau
i::Int = tableau_I[k] i::Int = tableau_I[k]
j::Int = tableau_J[k] j::Int = tableau_J[k]
v::Float64 = 0.0 alpha_j::Float64 = tableau_V[k]
frac_alpha_j = frac(tableau_V[k]) frac_alpha_j::Float64 = alpha_j - floor(alpha_j)
alpha_j = tableau_V[k] beta_i::Float64 = tableau_rhs[i]
beta = frac(tableau.rhs[i]) beta::Float64 = beta_i - floor(beta_i)
if data.var_types[j] == 'C' v::Float64 = 0
# Compute coefficient
if var_types[j] == 'C'
if alpha_j >= 0 if alpha_j >= 0
v = alpha_j / beta v = alpha_j / beta
else else
@@ -228,16 +287,34 @@ function compute_gmi(data::ProblemData, tableau::Tableau)::ConstraintSet
v = (1 - frac_alpha_j) / (1 - beta) v = (1 - frac_alpha_j) / (1 - beta)
end end
end end
# Store if significant
if abs(v) > 1e-8 if abs(v) > 1e-8
push!(lhs_I, i) nnz_count += 1
push!(lhs_J, tableau_J[k]) cut_lhs_I[nnz_count] = i
push!(lhs_V, v) cut_lhs_J[nnz_count] = j
cut_lhs_V[nnz_count] = v
cut_hash[i] = hash(j, cut_hash[i])
cut_hash[i] = hash(v, cut_hash[i])
end end
end end
lhs = sparse(lhs_I, lhs_J, lhs_V, nrows, ncols)
end end
return ConstraintSet(; lhs, ub, lb)
@timeit "Resize arrays to actual size" begin
resize!(cut_lhs_I, nnz_count)
resize!(cut_lhs_J, nnz_count)
resize!(cut_lhs_V, nnz_count)
end
# TODO: Build cut in compressed row format instead of converting
@timeit "Convert to ConstraintSet" begin
cut_lhs::SparseMatrixCSC = sparse(cut_lhs_I, cut_lhs_J, cut_lhs_V, nrows, ncols)
cs::ConstraintSet = ConstraintSet(; lhs=cut_lhs, ub=cut_ub, lb=cut_lb, hash=cut_hash)
end
return cs
end end
export compute_gmi, export compute_gmi,
frac, select_gmi_rows, assert_cuts_off, assert_does_not_cut_off, collect_gmi frac, select_gmi_rows, assert_cuts_off, assert_does_not_cut_off, collect_gmi

File diff suppressed because it is too large Load Diff

View File

@@ -27,25 +27,26 @@ function assert_eq(a, b; atol = 1e-4)
end end
function assert_cuts_off(cuts::ConstraintSet, x::Vector{Float64}, tol = 1e-6) function assert_cuts_off(cuts::ConstraintSet, x::Vector{Float64}, tol = 1e-6)
vals = cuts.lhs * x
for i = 1:length(cuts.lb) for i = 1:length(cuts.lb)
val = cuts.lhs[i, :]' * x if (vals[i] <= cuts.ub[i] - tol) && (vals[i] >= cuts.lb[i] + tol)
if (val <= cuts.ub[i] - tol) && (val >= cuts.lb[i] + tol) throw(ErrorException("inequality $i fails to cut off fractional solution: $(cuts.lb[i]) <= $(vals[i]) <= $(cuts.ub[i])"))
throw(ErrorException("inequality fails to cut off fractional solution"))
end end
end end
end end
function assert_does_not_cut_off(cuts::ConstraintSet, x::Vector{Float64}; tol = 1e-6) function assert_does_not_cut_off(cuts::ConstraintSet, x::Vector{Float64}; tol = 1e-6)
vals = cuts.lhs * x
for i = 1:length(cuts.lb) for i = 1:length(cuts.lb)
val = cuts.lhs[i, :]' * x if (vals[i] >= cuts.ub[i]) || (vals[i] <= cuts.lb[i])
ub = cuts.ub[i] throw(ErrorException("inequality $i cuts off integer solution: $(cuts.lb[i]) <= $(vals[i]) <= $(cuts.ub[i])"))
lb = cuts.lb[i]
if (val >= ub) || (val <= lb)
throw(
ErrorException(
"inequality $i cuts off integer solution ($lb <= $val <= $ub)",
),
)
end end
end end
end end
function assert_int(x::Float64, tol=1e-5)
fx = frac(x)
if min(fx, 1 - fx) >= tol
throw(ErrorException("Number must be integer: $x"))
end
end

View File

@@ -18,10 +18,10 @@ Base.@kwdef mutable struct ProblemData
end end
Base.@kwdef mutable struct Tableau Base.@kwdef mutable struct Tableau
obj::Any obj::Vector{Float64}
lhs::Any lhs::SparseMatrixCSC
rhs::Any rhs::Vector{Float64}
z::Any z::Float64
end end
Base.@kwdef mutable struct Basis Base.@kwdef mutable struct Basis
@@ -35,6 +35,7 @@ Base.@kwdef mutable struct ConstraintSet
lhs::SparseMatrixCSC lhs::SparseMatrixCSC
ub::Vector{Float64} ub::Vector{Float64}
lb::Vector{Float64} lb::Vector{Float64}
hash::Union{Nothing,Vector{UInt64}} = nothing
end end
export ProblemData, Tableau, Basis, ConstraintSet export ProblemData, Tableau, Basis, ConstraintSet

View File

@@ -4,14 +4,24 @@
using KLU using KLU
using TimerOutputs using TimerOutputs
using Gurobi
function get_basis(model::JuMP.Model)::Basis function get_basis(model::JuMP.Model)::Basis
if isa(unsafe_backend(model), Gurobi.Optimizer)
return get_basis_gurobi(model)
end
@timeit "Initialization" begin
var_basic = Int[] var_basic = Int[]
var_nonbasic = Int[] var_nonbasic = Int[]
constr_basic = Int[] constr_basic = Int[]
constr_nonbasic = Int[] constr_nonbasic = Int[]
nvars = num_variables(model)
sizehint!(var_basic, nvars)
sizehint!(var_nonbasic, nvars)
end
# Variables @timeit "Query variables" begin
for (i, var) in enumerate(all_variables(model)) for (i, var) in enumerate(all_variables(model))
bstatus = MOI.get(model, MOI.VariableBasisStatus(), var) bstatus = MOI.get(model, MOI.VariableBasisStatus(), var)
if bstatus == MOI.BASIC if bstatus == MOI.BASIC
@@ -22,8 +32,9 @@ function get_basis(model::JuMP.Model)::Basis
error("Unknown basis status: $bstatus") error("Unknown basis status: $bstatus")
end end
end end
end
# Constraints @timeit "Query constraints" begin
constr_index = 1 constr_index = 1
for (ftype, stype) in list_of_constraint_types(model) for (ftype, stype) in list_of_constraint_types(model)
for constr in all_constraints(model, ftype, stype) for constr in all_constraints(model, ftype, stype)
@@ -44,8 +55,110 @@ function get_basis(model::JuMP.Model)::Basis
end end
end end
end end
end
return Basis(; var_basic, var_nonbasic, constr_basic, constr_nonbasic) @timeit "Build basis struct" begin
basis = Basis(; var_basic, var_nonbasic, constr_basic, constr_nonbasic)
end
return basis
end
function set_basis(model::JuMP.Model, basis::Basis)
if isa(unsafe_backend(model), Gurobi.Optimizer)
# NOP
return
end
@timeit "Initialization" begin
nvars = num_variables(model)
gurobi_model = unsafe_backend(model).inner
end
@timeit "Set variable basis" begin
var_basis_statuses = Vector{Cint}(undef, nvars)
fill!(var_basis_statuses, -1) # Default to GRB_NONBASIC_LOWER
for var_idx in basis.var_basic
var_basis_statuses[var_idx] = 0 # GRB_BASIC
end
ret = GRBsetintattrarray(gurobi_model, "VBasis", 0, nvars, var_basis_statuses)
if ret != 0
error("Failed to set variable basis statuses in Gurobi: error code $ret")
end
end
@timeit "Set constraint basis" begin
nconstr = num_constraints(model, AffExpr, MOI.EqualTo{Float64})
constr_basis_statuses = Vector{Cint}(undef, nconstr)
fill!(constr_basis_statuses, -1) # Default to GRB_NONBASIC
for constr_idx in basis.constr_basic
constr_basis_statuses[constr_idx] = 0 # GRB_BASIC
end
ret = GRBsetintattrarray(gurobi_model, "CBasis", 0, nconstr, constr_basis_statuses)
if ret != 0
error("Failed to set constraint basis statuses in Gurobi: error code $ret")
end
end
return nothing
end
function get_basis_gurobi(model::JuMP.Model)::Basis
@timeit "Initialization" begin
var_basic = Int[]
var_nonbasic = Int[]
constr_basic = Int[]
constr_nonbasic = Int[]
nvars = num_variables(model)
sizehint!(var_basic, nvars)
sizehint!(var_nonbasic, nvars)
gurobi_model = unsafe_backend(model).inner
end
@timeit "Query variables" begin
var_basis_statuses = Vector{Cint}(undef, nvars)
ret = GRBgetintattrarray(gurobi_model, "VBasis", 0, nvars, var_basis_statuses)
if ret != 0
error("Failed to get variable basis statuses from Gurobi: error code $ret")
end
for i in 1:nvars
if var_basis_statuses[i] == 0 # GRB_BASIC
push!(var_basic, i)
elseif var_basis_statuses[i] == -1 # GRB_NONBASIC_LOWER
push!(var_nonbasic, i)
else
error("Unknown variable basis status: $(var_basis_statuses[i])")
end
end
end
@timeit "Query constraints" begin
nconstr = num_constraints(model, AffExpr, MOI.EqualTo{Float64})
constr_basis_statuses = Vector{Cint}(undef, nconstr)
ret = GRBgetintattrarray(gurobi_model, "CBasis", 0, nconstr, constr_basis_statuses)
if ret != 0
error("Failed to get constraint basis statuses from Gurobi: error code $ret")
end
for i in 1:nconstr
if constr_basis_statuses[i] == 0 # GRB_BASIC
push!(constr_basic, i)
elseif constr_basis_statuses[i] == -1 # GRB_NONBASIC
push!(constr_nonbasic, i)
else
error("Unknown constraint basis status: $(constr_basis_statuses[i])")
end
end
end
@timeit "Build basis struct" begin
basis = Basis(; var_basic, var_nonbasic, constr_basic, constr_nonbasic)
end
return basis
end end
function get_x(model::JuMP.Model) function get_x(model::JuMP.Model)
@@ -58,7 +171,12 @@ function compute_tableau(
x::Union{Nothing,Vector{Float64}} = nothing, x::Union{Nothing,Vector{Float64}} = nothing,
rows::Union{Vector{Int},Nothing} = nothing, rows::Union{Vector{Int},Nothing} = nothing,
tol = 1e-8, tol = 1e-8,
estimated_density = 0.10,
)::Tableau )::Tableau
if isnan(estimated_density) || estimated_density <= 0
estimated_density = 0.10
end
@timeit "Split data" begin @timeit "Split data" begin
nrows, ncols = size(data.constr_lhs) nrows, ncols = size(data.constr_lhs)
lhs_slacks = sparse(I, nrows, nrows) lhs_slacks = sparse(I, nrows, nrows)
@@ -73,35 +191,71 @@ function compute_tableau(
factor = klu(sparse(lhs_b')) factor = klu(sparse(lhs_b'))
end end
@timeit "Compute tableau" begin @timeit "Initialize arrays" begin
@timeit "Initialize" begin num_rows = length(rows)
tableau_rhs = zeros(length(rows)) tableau_rhs::Array{Float64} = zeros(num_rows)
tableau_lhs = zeros(length(rows), ncols) tableau_rowptr::Array{Int} = zeros(Int, num_rows + 1)
end tableau_colval::Array{Int} = Int[]
for k in eachindex(1:length(rows)) tableau_nzval::Array{Float64} = Float64[]
@timeit "Prepare inputs" begin estimated_nnz::Int = round(num_rows * ncols * estimated_density)
i = rows[k] sizehint!(tableau_colval, estimated_nnz)
e = zeros(nrows) sizehint!(tableau_nzval, estimated_nnz)
e[i] = 1.0 e::Array{Float64} = zeros(nrows)
sol::Array{Float64} = zeros(nrows)
tableau_row::Array{Float64} = zeros(ncols)
end end
A = data.constr_lhs'
b = data.constr_ub
tableau_rowptr[1] = 1
@timeit "Process rows" begin
for k in eachindex(rows)
@timeit "Solve" begin @timeit "Solve" begin
sol = factor \ e fill!(e, 0.0)
e[rows[k]] = 1.0
ldiv!(sol, factor, e)
end end
@timeit "Multiply" begin @timeit "Compute row" begin
tableau_lhs[k, :] = sol' * data.constr_lhs mul!(tableau_row, A, sol)
tableau_rhs[k] = sol' * data.constr_ub tableau_rhs[k] = dot(sol, b)
end
needed_space = length(tableau_colval) + ncols
if needed_space > estimated_nnz
@timeit "Grow arrays" begin
estimated_nnz *= 2
sizehint!(tableau_colval, estimated_nnz)
sizehint!(tableau_nzval, estimated_nnz)
end end
end end
@timeit "Sparsify" begin @timeit "Collect nonzeros for row" begin
tableau_lhs[abs.(tableau_lhs) .<= tol] .= 0 for j in 1:ncols
tableau_lhs = sparse(tableau_lhs) val = tableau_row[j]
if abs(val) > tol
push!(tableau_colval, j)
push!(tableau_nzval, val)
end end
end end
end
tableau_rowptr[k + 1] = length(tableau_colval) + 1
end
end
@timeit "Shrink arrays" begin
sizehint!(tableau_colval, length(tableau_colval))
sizehint!(tableau_nzval, length(tableau_nzval))
end
@timeit "Build sparse matrix" begin
tableau_lhs_transposed = SparseMatrixCSC(ncols, num_rows, tableau_rowptr, tableau_colval, tableau_nzval)
tableau_lhs = transpose(tableau_lhs_transposed)
end
@timeit "Compute tableau objective row" begin @timeit "Compute tableau objective row" begin
sol = factor \ obj_b sol = factor \ obj_b
tableau_obj = -data.obj' + sol' * data.constr_lhs tableau_obj = -data.obj' + sol' * data.constr_lhs
tableau_obj[abs.(tableau_obj).<tol] .= 0 tableau_obj[abs.(tableau_obj).<tol] .= 0
tableau_obj = Array(tableau_obj')
end end
# Compute z if solution is provided # Compute z if solution is provided
@@ -113,4 +267,4 @@ function compute_tableau(
return Tableau(obj = tableau_obj, lhs = tableau_lhs, rhs = tableau_rhs, z = z) return Tableau(obj = tableau_obj, lhs = tableau_lhs, rhs = tableau_rhs, z = z)
end end
export get_basis, get_x, compute_tableau export get_basis, get_basis_gurobi, set_basis, get_x, compute_tableau

View File

@@ -96,46 +96,70 @@ Base.@kwdef mutable struct AddSlackVariables <: Transform
end end
function forward!(t::AddSlackVariables, data::ProblemData) function forward!(t::AddSlackVariables, data::ProblemData)
@timeit "Identify constraint type" begin
nrows, ncols = size(data.constr_lhs) nrows, ncols = size(data.constr_lhs)
isequality = abs.(data.constr_ub .- data.constr_lb) .< 1e-6 isequality = abs.(data.constr_ub .- data.constr_lb) .< 1e-6
eq = [i for i = 1:nrows if isequality[i]] eq = [i for i = 1:nrows if isequality[i]]
ge = [i for i = 1:nrows if isfinite(data.constr_lb[i]) && !isequality[i]] ge = [i for i = 1:nrows if isfinite(data.constr_lb[i]) && !isequality[i]]
le = [i for i = 1:nrows if isfinite(data.constr_ub[i]) && !isequality[i]] le = [i for i = 1:nrows if isfinite(data.constr_ub[i]) && !isequality[i]]
EQ, GE, LE = length(eq), length(ge), length(le) EQ, GE, LE = length(eq), length(ge), length(le)
end
@timeit "Identify slack type" begin
constr_lhs_t = sparse(data.constr_lhs')
function is_integral(row_idx, rhs)
rhs_is_integer = abs(rhs - round(rhs)) <= 1e-6
cols, coeffs = findnz(constr_lhs_t[:, row_idx])[1:2]
vars_are_integer = all(j -> data.var_types[j] ['I', 'B'], cols)
coeffs_are_integer = all(v -> abs(v - round(v)) <= 1e-6, coeffs)
return rhs_is_integer && vars_are_integer && coeffs_are_integer
end
slack_types = [
[is_integral(ge[i], data.constr_lb[ge[i]]) ? 'I' : 'C' for i = 1:GE];
[is_integral(le[i], data.constr_ub[le[i]]) ? 'I' : 'C' for i = 1:LE]
]
end
@timeit "Build M1" begin
t.M1 = [ t.M1 = [
I spzeros(ncols, GE + LE) I spzeros(ncols, GE + LE)
data.constr_lhs[ge, :] spzeros(GE, GE + LE) data.constr_lhs[ge, :] spzeros(GE, GE + LE)
-data.constr_lhs[le, :] spzeros(LE, GE + LE) -data.constr_lhs[le, :] spzeros(LE, GE + LE)
] ]
end
@timeit "Build M2" begin
t.M2 = [ t.M2 = [
zeros(ncols) zeros(ncols)
data.constr_lb[ge] data.constr_lb[ge]
-data.constr_ub[le] -data.constr_ub[le]
] ]
end
@timeit "Build t.lhs, t.rhs" begin
t.ncols_orig = ncols t.ncols_orig = ncols
t.GE, t.LE = GE, LE t.GE, t.LE = GE, LE
t.lhs_ge = data.constr_lhs[ge, :] t.lhs_ge = data.constr_lhs[ge, :]
t.lhs_le = data.constr_lhs[le, :] t.lhs_le = data.constr_lhs[le, :]
t.rhs_ge = data.constr_lb[ge] t.rhs_ge = data.constr_lb[ge]
t.rhs_le = data.constr_ub[le] t.rhs_le = data.constr_ub[le]
end
@timeit "Build data.constr_lhs" begin
data.constr_lhs = [ data.constr_lhs = [
data.constr_lhs[eq, :] spzeros(EQ, GE) spzeros(EQ, LE) data.constr_lhs[eq, :] spzeros(EQ, GE) spzeros(EQ, LE)
data.constr_lhs[ge, :] -I spzeros(GE, LE) data.constr_lhs[ge, :] -I spzeros(GE, LE)
data.constr_lhs[le, :] spzeros(LE, GE) I data.constr_lhs[le, :] spzeros(LE, GE) I
] ]
end
@timeit "Build other data fields" begin
data.obj = [data.obj; zeros(GE + LE)] data.obj = [data.obj; zeros(GE + LE)]
data.var_lb = [data.var_lb; zeros(GE + LE)] data.var_lb = [data.var_lb; zeros(GE + LE)]
data.var_ub = [data.var_ub; [Inf for _ = 1:(GE+LE)]] data.var_ub = [data.var_ub; [Inf for _ = 1:(GE+LE)]]
data.var_names = [data.var_names; ["__s$i" for i = 1:(GE+LE)]] data.var_names = [data.var_names; ["__s$i" for i = 1:(GE+LE)]]
data.var_types = [data.var_types; ['C' for _ = 1:(GE+LE)]] data.var_types = [data.var_types; slack_types]
data.constr_lb = [ data.constr_lb = [
data.constr_lb[eq] data.constr_lb[eq]
data.constr_lb[ge] data.constr_lb[ge]
data.constr_ub[le] data.constr_ub[le]
] ]
data.constr_ub = copy(data.constr_lb) data.constr_ub = copy(data.constr_lb)
end
end end
function backwards!(t::AddSlackVariables, c::ConstraintSet) function backwards!(t::AddSlackVariables, c::ConstraintSet)
@@ -155,71 +179,55 @@ end
# ----------------------------------------------------------------------------- # -----------------------------------------------------------------------------
Base.@kwdef mutable struct SplitFreeVars <: Transform Base.@kwdef mutable struct SplitFreeVars <: Transform
F::Int = 0 ncols::Int = 0
B::Int = 0 is_var_free::Vector{Bool} = []
free::Vector{Int} = []
others::Vector{Int} = []
end end
function forward!(t::SplitFreeVars, data::ProblemData) function forward!(t::SplitFreeVars, data::ProblemData)
lhs = data.constr_lhs lhs = data.constr_lhs
_, ncols = size(lhs) _, ncols = size(lhs)
free = [i for i = 1:ncols if !isfinite(data.var_lb[i]) && !isfinite(data.var_ub[i])] is_var_free = [!isfinite(data.var_lb[i]) && !isfinite(data.var_ub[i]) for i = 1:ncols]
others = [i for i = 1:ncols if isfinite(data.var_lb[i]) || isfinite(data.var_ub[i])] free_idx = findall(is_var_free)
t.F = length(free)
t.B = length(others)
t.free, t.others = free, others
data.obj = [ data.obj = [
data.obj[others] data.obj
data.obj[free] [-data.obj[i] for i in free_idx]
-data.obj[free]
] ]
data.constr_lhs = [lhs[:, others] lhs[:, free] -lhs[:, free]]
data.var_lb = [ data.var_lb = [
data.var_lb[others] [is_var_free[i] ? 0.0 : data.var_lb[i] for i in 1:ncols]
[0.0 for _ in free] [0 for _ in free_idx]
[0.0 for _ in free]
] ]
data.var_ub = [ data.var_ub = [
data.var_ub[others] [is_var_free[i] ? Inf : data.var_ub[i] for i in 1:ncols]
[Inf for _ in free] [Inf for _ in free_idx]
[Inf for _ in free]
] ]
data.var_types = [ data.var_types = [
data.var_types[others] data.var_types
data.var_types[free] [data.var_types[i] for i in free_idx]
data.var_types[free]
] ]
data.var_names = [ data.var_names = [
data.var_names[others] data.var_names
["$(v)_p" for v in data.var_names[free]] ["$(data.var_names[i])_neg" for i in free_idx]
["$(v)_m" for v in data.var_names[free]]
] ]
data.constr_lhs = [lhs -lhs[:, free_idx]]
t.is_var_free, t.ncols = is_var_free, ncols
end end
function backwards!(t::SplitFreeVars, c::ConstraintSet) function backwards!(t::SplitFreeVars, c::ConstraintSet)
# Convert GE constraints into LE ncols, is_var_free = t.ncols, t.is_var_free
nrows, _ = size(c.lhs) free_idx = findall(is_var_free)
ge = [i for i = 1:nrows if isfinite(c.lb[i])]
c.ub[ge], c.lb[ge] = -c.lb[ge], -c.ub[ge]
c.lhs[ge, :] *= -1
# Assert only LE constraints are left (EQ constraints are not supported) for (offset, var_idx) in enumerate(free_idx)
@assert all(c.lb .== -Inf) @assert c.lhs[:, var_idx] == -c.lhs[:, ncols+offset]
# Take minimum (weakest) coefficient
B, F = t.B, t.F
for i = 1:F
c.lhs[:, B+i] = min.(c.lhs[:, B+i], -c.lhs[:, B+F+i])
end end
c.lhs = c.lhs[:, 1:(B+F)] c.lhs = c.lhs[:, 1:ncols]
end end
function forward(t::SplitFreeVars, p::Vector{Float64})::Vector{Float64} function forward(t::SplitFreeVars, p::Vector{Float64})::Vector{Float64}
ncols, is_var_free = t.ncols, t.is_var_free
free_idx = findall(is_var_free)
return [ return [
p[t.others] [is_var_free[i] ? max(0, p[i]) : p[i] for i in 1:ncols]
max.(p[t.free], 0) [max(0, -p[i]) for i in free_idx]
max.(-p[t.free], 0)
] ]
end end

View File

@@ -13,6 +13,7 @@ include("collectors.jl")
include("components.jl") include("components.jl")
include("extractors.jl") include("extractors.jl")
include("io.jl") include("io.jl")
include("problems/maxcut.jl")
include("problems/setcover.jl") include("problems/setcover.jl")
include("problems/stab.jl") include("problems/stab.jl")
include("problems/tsp.jl") include("problems/tsp.jl")
@@ -24,6 +25,7 @@ function __init__()
__init_components__() __init_components__()
__init_extractors__() __init_extractors__()
__init_io__() __init_io__()
__init_problems_maxcut__()
__init_problems_setcover__() __init_problems_setcover__()
__init_problems_stab__() __init_problems_stab__()
__init_problems_tsp__() __init_problems_tsp__()
@@ -37,48 +39,48 @@ include("Cuts/Cuts.jl")
# Precompilation # Precompilation
# ============================================================================= # =============================================================================
function __precompile_cuts__() # function __precompile_cuts__()
function build_model(mps_filename) # function build_model(mps_filename)
model = read_from_file(mps_filename) # model = read_from_file(mps_filename)
set_optimizer(model, SCIP.Optimizer) # set_optimizer(model, SCIP.Optimizer)
return JumpModel(model) # return JumpModel(model)
end # end
BASEDIR = dirname(@__FILE__) # BASEDIR = dirname(@__FILE__)
mps_filename = "$BASEDIR/../test/fixtures/bell5.mps.gz" # mps_filename = "$BASEDIR/../test/fixtures/bell5.mps.gz"
h5_filename = "$BASEDIR/../test/fixtures/bell5.h5" # h5_filename = "$BASEDIR/../test/fixtures/bell5.h5"
collect_gmi_dual( # collect_gmi_dual(
mps_filename; # mps_filename;
optimizer=HiGHS.Optimizer, # optimizer=HiGHS.Optimizer,
max_rounds = 10, # max_rounds = 10,
max_cuts_per_round = 500, # max_cuts_per_round = 500,
) # )
knn = KnnDualGmiComponent( # knn = KnnDualGmiComponent(
extractor = H5FieldsExtractor(instance_fields = ["static_var_obj_coeffs"]), # extractor = H5FieldsExtractor(instance_fields = ["static_var_obj_coeffs"]),
k = 2, # k = 2,
) # )
knn.fit([h5_filename, h5_filename]) # knn.fit([h5_filename, h5_filename])
solver = LearningSolver( # solver = LearningSolver(
components = [ # components = [
ExpertPrimalComponent(action = SetWarmStart()), # ExpertPrimalComponent(action = SetWarmStart()),
knn, # knn,
], # ],
skip_lp = true, # skip_lp = true,
) # )
solver.optimize(mps_filename, build_model) # solver.optimize(mps_filename, build_model)
end # end
@setup_workload begin # @setup_workload begin
using SCIP # using SCIP
using HiGHS # using HiGHS
using MIPLearn.Cuts # using MIPLearn.Cuts
using PrecompileTools: @setup_workload, @compile_workload # using PrecompileTools: @setup_workload, @compile_workload
__init__() # __init__()
Cuts.__init__() # Cuts.__init__()
@compile_workload begin # @compile_workload begin
__precompile_cuts__() # __precompile_cuts__()
end # end
end # end
end # module end # module

31
src/problems/maxcut.jl Normal file
View File

@@ -0,0 +1,31 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020-2025, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
using JuMP
global MaxCutData = PyNULL()
global MaxCutGenerator = PyNULL()
function __init_problems_maxcut__()
copy!(MaxCutData, pyimport("miplearn.problems.maxcut").MaxCutData)
copy!(MaxCutGenerator, pyimport("miplearn.problems.maxcut").MaxCutGenerator)
end
function build_maxcut_model_jump(data::Any; optimizer)
if data isa String
data = read_pkl_gz(data)
end
nodes = collect(data.graph.nodes())
edges = collect(data.graph.edges())
model = Model(optimizer)
@variable(model, x[nodes], Bin)
@objective(
model,
Min,
sum(-data.weights[i] * x[e[1]] * (1 - x[e[2]]) for (i, e) in enumerate(edges))
)
return JumpModel(model)
end
export MaxCutData, MaxCutGenerator, build_maxcut_model_jump

View File

@@ -69,14 +69,13 @@ function submit(model::JuMP.Model, constr)
end end
function _extract_after_load(model::JuMP.Model, h5) function _extract_after_load(model::JuMP.Model, h5)
@info "_extract_after_load"
if JuMP.objective_sense(model) == MOI.MIN_SENSE if JuMP.objective_sense(model) == MOI.MIN_SENSE
h5.put_scalar("static_sense", "min") h5.put_scalar("static_sense", "min")
else else
h5.put_scalar("static_sense", "max") h5.put_scalar("static_sense", "max")
end end
@time _extract_after_load_vars(model, h5) _extract_after_load_vars(model, h5)
@time _extract_after_load_constrs(model, h5) _extract_after_load_constrs(model, h5)
end end
function _extract_after_load_vars(model::JuMP.Model, h5) function _extract_after_load_vars(model::JuMP.Model, h5)
@@ -90,14 +89,27 @@ function _extract_after_load_vars(model::JuMP.Model, h5)
for v in vars for v in vars
] ]
types = [JuMP.is_binary(v) ? "B" : JuMP.is_integer(v) ? "I" : "C" for v in vars] types = [JuMP.is_binary(v) ? "B" : JuMP.is_integer(v) ? "I" : "C" for v in vars]
obj = objective_function(model, AffExpr)
obj_coeffs = [v keys(obj.terms) ? obj.terms[v] : 0.0 for v in vars] # Linear obj terms
obj = objective_function(model, QuadExpr)
obj_coeffs_linear = [v keys(obj.aff.terms) ? obj.aff.terms[v] : 0.0 for v in vars]
# Quadratic obj terms
if length(obj.terms) > 0
nvars = length(vars)
obj_coeffs_quad = zeros(nvars, nvars)
for (pair, coeff) in obj.terms
obj_coeffs_quad[pair.a.index.value, pair.b.index.value] = coeff
end
h5.put_array("static_var_obj_coeffs_quad", obj_coeffs_quad)
end
h5.put_array("static_var_names", to_str_array(JuMP.name.(vars))) h5.put_array("static_var_names", to_str_array(JuMP.name.(vars)))
h5.put_array("static_var_types", to_str_array(types)) h5.put_array("static_var_types", to_str_array(types))
h5.put_array("static_var_lower_bounds", lb) h5.put_array("static_var_lower_bounds", lb)
h5.put_array("static_var_upper_bounds", ub) h5.put_array("static_var_upper_bounds", ub)
h5.put_array("static_var_obj_coeffs", obj_coeffs) h5.put_array("static_var_obj_coeffs", obj_coeffs_linear)
h5.put_scalar("static_obj_offset", obj.constant) h5.put_scalar("static_obj_offset", obj.aff.constant)
end end
function _extract_after_load_constrs(model::JuMP.Model, h5) function _extract_after_load_constrs(model::JuMP.Model, h5)
@@ -144,7 +156,7 @@ function _extract_after_load_constrs(model::JuMP.Model, h5)
end end
end end
if isempty(names) if isempty(names)
error("no model constraints found; note that MIPLearn ignores unnamed constraints") return
end end
lhs = sparse(lhs_rows, lhs_cols, lhs_values, length(rhs), JuMP.num_variables(model)) lhs = sparse(lhs_rows, lhs_cols, lhs_values, length(rhs), JuMP.num_variables(model))
h5.put_sparse("static_constr_lhs", lhs) h5.put_sparse("static_constr_lhs", lhs)
@@ -154,11 +166,10 @@ function _extract_after_load_constrs(model::JuMP.Model, h5)
end end
function _extract_after_lp(model::JuMP.Model, h5) function _extract_after_lp(model::JuMP.Model, h5)
@info "_extract_after_lp"
h5.put_scalar("lp_wallclock_time", solve_time(model)) h5.put_scalar("lp_wallclock_time", solve_time(model))
h5.put_scalar("lp_obj_value", objective_value(model)) h5.put_scalar("lp_obj_value", objective_value(model))
@time _extract_after_lp_vars(model, h5) _extract_after_lp_vars(model, h5)
@time _extract_after_lp_constrs(model, h5) _extract_after_lp_constrs(model, h5)
end end
function _extract_after_lp_vars(model::JuMP.Model, h5) function _extract_after_lp_vars(model::JuMP.Model, h5)
@@ -184,46 +195,46 @@ function _extract_after_lp_vars(model::JuMP.Model, h5)
end end
h5.put_array("lp_var_basis_status", to_str_array(basis_status)) h5.put_array("lp_var_basis_status", to_str_array(basis_status))
# # Sensitivity analysis # Sensitivity analysis
# obj_coeffs = h5.get_array("static_var_obj_coeffs") obj_coeffs = h5.get_array("static_var_obj_coeffs")
# sensitivity_report = lp_sensitivity_report(model) sensitivity_report = lp_sensitivity_report(model)
# sa_obj_down, sa_obj_up = Float64[], Float64[] sa_obj_down, sa_obj_up = Float64[], Float64[]
# sa_lb_down, sa_lb_up = Float64[], Float64[] sa_lb_down, sa_lb_up = Float64[], Float64[]
# sa_ub_down, sa_ub_up = Float64[], Float64[] sa_ub_down, sa_ub_up = Float64[], Float64[]
# for (i, v) in enumerate(vars) for (i, v) in enumerate(vars)
# # Objective function # Objective function
# (delta_down, delta_up) = sensitivity_report[v] (delta_down, delta_up) = sensitivity_report[v]
# push!(sa_obj_down, delta_down + obj_coeffs[i]) push!(sa_obj_down, delta_down + obj_coeffs[i])
# push!(sa_obj_up, delta_up + obj_coeffs[i]) push!(sa_obj_up, delta_up + obj_coeffs[i])
# # Lower bound # Lower bound
# if has_lower_bound(v) if has_lower_bound(v)
# constr = LowerBoundRef(v) constr = LowerBoundRef(v)
# (delta_down, delta_up) = sensitivity_report[constr] (delta_down, delta_up) = sensitivity_report[constr]
# push!(sa_lb_down, lower_bound(v) + delta_down) push!(sa_lb_down, lower_bound(v) + delta_down)
# push!(sa_lb_up, lower_bound(v) + delta_up) push!(sa_lb_up, lower_bound(v) + delta_up)
# else else
# push!(sa_lb_down, -Inf) push!(sa_lb_down, -Inf)
# push!(sa_lb_up, -Inf) push!(sa_lb_up, -Inf)
# end end
# # Upper bound # Upper bound
# if has_upper_bound(v) if has_upper_bound(v)
# constr = JuMP.UpperBoundRef(v) constr = JuMP.UpperBoundRef(v)
# (delta_down, delta_up) = sensitivity_report[constr] (delta_down, delta_up) = sensitivity_report[constr]
# push!(sa_ub_down, upper_bound(v) + delta_down) push!(sa_ub_down, upper_bound(v) + delta_down)
# push!(sa_ub_up, upper_bound(v) + delta_up) push!(sa_ub_up, upper_bound(v) + delta_up)
# else else
# push!(sa_ub_down, Inf) push!(sa_ub_down, Inf)
# push!(sa_ub_up, Inf) push!(sa_ub_up, Inf)
# end end
# end end
# h5.put_array("lp_var_sa_obj_up", sa_obj_up) h5.put_array("lp_var_sa_obj_up", sa_obj_up)
# h5.put_array("lp_var_sa_obj_down", sa_obj_down) h5.put_array("lp_var_sa_obj_down", sa_obj_down)
# h5.put_array("lp_var_sa_ub_up", sa_ub_up) h5.put_array("lp_var_sa_ub_up", sa_ub_up)
# h5.put_array("lp_var_sa_ub_down", sa_ub_down) h5.put_array("lp_var_sa_ub_down", sa_ub_down)
# h5.put_array("lp_var_sa_lb_up", sa_lb_up) h5.put_array("lp_var_sa_lb_up", sa_lb_up)
# h5.put_array("lp_var_sa_lb_down", sa_lb_down) h5.put_array("lp_var_sa_lb_down", sa_lb_down)
end end
@@ -239,7 +250,7 @@ function _extract_after_lp_constrs(model::JuMP.Model, h5)
duals = Float64[] duals = Float64[]
basis_status = [] basis_status = []
constr_idx = 1 constr_idx = 1
# sensitivity_report = lp_sensitivity_report(model) sensitivity_report = lp_sensitivity_report(model)
for (ftype, stype) in JuMP.list_of_constraint_types(model) for (ftype, stype) in JuMP.list_of_constraint_types(model)
for constr in JuMP.all_constraints(model, ftype, stype) for constr in JuMP.all_constraints(model, ftype, stype)
length(JuMP.name(constr)) > 0 || continue length(JuMP.name(constr)) > 0 || continue
@@ -257,22 +268,21 @@ function _extract_after_lp_constrs(model::JuMP.Model, h5)
error("Unknown basis status: $b") error("Unknown basis status: $b")
end end
# # Sensitivity analysis # Sensitivity analysis
# (delta_down, delta_up) = sensitivity_report[constr] (delta_down, delta_up) = sensitivity_report[constr]
# push!(sa_rhs_down, rhs[constr_idx] + delta_down) push!(sa_rhs_down, rhs[constr_idx] + delta_down)
# push!(sa_rhs_up, rhs[constr_idx] + delta_up) push!(sa_rhs_up, rhs[constr_idx] + delta_up)
constr_idx += 1 constr_idx += 1
end end
end end
h5.put_array("lp_constr_dual_values", duals) h5.put_array("lp_constr_dual_values", duals)
h5.put_array("lp_constr_basis_status", to_str_array(basis_status)) h5.put_array("lp_constr_basis_status", to_str_array(basis_status))
# h5.put_array("lp_constr_sa_rhs_up", sa_rhs_up) h5.put_array("lp_constr_sa_rhs_up", sa_rhs_up)
# h5.put_array("lp_constr_sa_rhs_down", sa_rhs_down) h5.put_array("lp_constr_sa_rhs_down", sa_rhs_down)
end end
function _extract_after_mip(model::JuMP.Model, h5) function _extract_after_mip(model::JuMP.Model, h5)
@info "_extract_after_mip"
h5.put_scalar("mip_obj_value", objective_value(model)) h5.put_scalar("mip_obj_value", objective_value(model))
h5.put_scalar("mip_obj_bound", objective_bound(model)) h5.put_scalar("mip_obj_bound", objective_bound(model))
h5.put_scalar("mip_wallclock_time", solve_time(model)) h5.put_scalar("mip_wallclock_time", solve_time(model))
@@ -285,9 +295,11 @@ function _extract_after_mip(model::JuMP.Model, h5)
# Slacks # Slacks
lhs = h5.get_sparse("static_constr_lhs") lhs = h5.get_sparse("static_constr_lhs")
if lhs !== nothing
rhs = h5.get_array("static_constr_rhs") rhs = h5.get_array("static_constr_rhs")
slacks = abs.(lhs * x - rhs) slacks = abs.(lhs * x - rhs)
h5.put_array("mip_constr_slacks", slacks) h5.put_array("mip_constr_slacks", slacks)
end
# Cuts and lazy constraints # Cuts and lazy constraints
ext = model.ext[:miplearn] ext = model.ext[:miplearn]
@@ -298,9 +310,7 @@ end
function _fix_variables(model::JuMP.Model, var_names, var_values, stats) function _fix_variables(model::JuMP.Model, var_names, var_values, stats)
vars = [variable_by_name(model, v) for v in var_names] vars = [variable_by_name(model, v) for v in var_names]
for (i, var) in enumerate(vars) for (i, var) in enumerate(vars)
if isfinite(var_values[i]) fix(var, var_values[i], force = true)
fix(var, var_values[i], force=true)
end
end end
end end
@@ -417,7 +427,7 @@ function __init_solvers_jump__()
constrs_lhs, constrs_lhs,
constrs_sense, constrs_sense,
constrs_rhs, constrs_rhs,
stats=nothing, stats = nothing,
) = _add_constrs( ) = _add_constrs(
self.inner, self.inner,
from_str_array(var_names), from_str_array(var_names),
@@ -433,14 +443,14 @@ function __init_solvers_jump__()
extract_after_mip(self, h5) = _extract_after_mip(self.inner, h5) extract_after_mip(self, h5) = _extract_after_mip(self.inner, h5)
fix_variables(self, var_names, var_values, stats=nothing) = fix_variables(self, var_names, var_values, stats = nothing) =
_fix_variables(self.inner, from_str_array(var_names), var_values, stats) _fix_variables(self.inner, from_str_array(var_names), var_values, stats)
optimize(self) = _optimize(self.inner) optimize(self) = _optimize(self.inner)
relax(self) = Class(_relax(self.inner)) relax(self) = Class(_relax(self.inner))
set_warm_starts(self, var_names, var_values, stats=nothing) = set_warm_starts(self, var_names, var_values, stats = nothing) =
_set_warm_starts(self.inner, from_str_array(var_names), var_values, stats) _set_warm_starts(self.inner, from_str_array(var_names), var_values, stats)
write(self, filename) = _write(self.inner, filename) write(self, filename) = _write(self.inner, filename)

View File

@@ -4,9 +4,7 @@ authors = ["Alinson S. Xavier <git@axavier.org>"]
version = "0.1.0" version = "0.1.0"
[deps] [deps]
CSV = "336ed68f-0bac-5ca0-87d4-7b16caf5d00b"
Clp = "e2554f3b-3117-50c0-817c-e040a3ddf72d" Clp = "e2554f3b-3117-50c0-817c-e040a3ddf72d"
DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
GLPK = "60bf3e95-4087-53dc-ae20-288a0d20c6a6" GLPK = "60bf3e95-4087-53dc-ae20-288a0d20c6a6"
Glob = "c27321d9-0574-5035-807b-f59d2c89b15c" Glob = "c27321d9-0574-5035-807b-f59d2c89b15c"
HDF5 = "f67ccb44-e63f-5c2f-98bd-6dc0ccc4ba2f" HDF5 = "f67ccb44-e63f-5c2f-98bd-6dc0ccc4ba2f"

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

File diff suppressed because one or more lines are too long

View File

@@ -10,12 +10,9 @@ using Test
using MIPLearn.BB using MIPLearn.BB
using MIPLearn using MIPLearn
using CSV
using DataFrames
basepath = @__DIR__ basepath = @__DIR__
function bb_run(optimizer_name, optimizer; large=true) function bb_run(optimizer_name, optimizer; large = true)
@testset "Solve ($optimizer_name)" begin @testset "Solve ($optimizer_name)" begin
@testset "interface" begin @testset "interface" begin
filename = "$FIXTURES/danoint.mps.gz" filename = "$FIXTURES/danoint.mps.gz"
@@ -28,7 +25,7 @@ function bb_run(optimizer_name, optimizer; large=true)
status, obj = BB.solve_relaxation!(mip) status, obj = BB.solve_relaxation!(mip)
@test status == :Optimal @test status == :Optimal
@test round(obj, digits=6) == 62.637280 @test round(obj, digits = 6) == 62.637280
@test BB.name(mip, mip.int_vars[1]) == "xab" @test BB.name(mip, mip.int_vars[1]) == "xab"
@test BB.name(mip, mip.int_vars[2]) == "xac" @test BB.name(mip, mip.int_vars[2]) == "xac"
@@ -38,26 +35,26 @@ function bb_run(optimizer_name, optimizer; large=true)
@test mip.int_vars_ub[1] == 1.0 @test mip.int_vars_ub[1] == 1.0
vals = BB.values(mip, mip.int_vars) vals = BB.values(mip, mip.int_vars)
@test round(vals[1], digits=6) == 0.046933 @test round(vals[1], digits = 6) == 0.046933
@test round(vals[2], digits=6) == 0.000841 @test round(vals[2], digits = 6) == 0.000841
@test round(vals[3], digits=6) == 0.248696 @test round(vals[3], digits = 6) == 0.248696
# Probe (up and down are feasible) # Probe (up and down are feasible)
probe_up, probe_down = BB.probe(mip, mip.int_vars[1], 0.5, 0.0, 1.0, 1_000_000) probe_up, probe_down = BB.probe(mip, mip.int_vars[1], 0.5, 0.0, 1.0, 1_000_000)
@test round(probe_down, digits=6) == 62.690000 @test round(probe_down, digits = 6) == 62.690000
@test round(probe_up, digits=6) == 62.714100 @test round(probe_up, digits = 6) == 62.714100
# Fix one variable to zero # Fix one variable to zero
BB.set_bounds!(mip, mip.int_vars[1:1], [0.0], [0.0]) BB.set_bounds!(mip, mip.int_vars[1:1], [0.0], [0.0])
status, obj = BB.solve_relaxation!(mip) status, obj = BB.solve_relaxation!(mip)
@test status == :Optimal @test status == :Optimal
@test round(obj, digits=6) == 62.690000 @test round(obj, digits = 6) == 62.690000
# Fix one variable to one and another variable variable to zero # Fix one variable to one and another variable variable to zero
BB.set_bounds!(mip, mip.int_vars[1:2], [1.0, 0.0], [1.0, 0.0]) BB.set_bounds!(mip, mip.int_vars[1:2], [1.0, 0.0], [1.0, 0.0])
status, obj = BB.solve_relaxation!(mip) status, obj = BB.solve_relaxation!(mip)
@test status == :Optimal @test status == :Optimal
@test round(obj, digits=6) == 62.714777 @test round(obj, digits = 6) == 62.714777
# Fix all binary variables to one, making problem infeasible # Fix all binary variables to one, making problem infeasible
N = length(mip.int_vars) N = length(mip.int_vars)
@@ -71,7 +68,7 @@ function bb_run(optimizer_name, optimizer; large=true)
BB.set_bounds!(mip, mip.int_vars, zeros(N), ones(N)) BB.set_bounds!(mip, mip.int_vars, zeros(N), ones(N))
status, obj = BB.solve_relaxation!(mip) status, obj = BB.solve_relaxation!(mip)
@test status == :Optimal @test status == :Optimal
@test round(obj, digits=6) == 62.637280 @test round(obj, digits = 6) == 62.637280
end end
@testset "varbranch" begin @testset "varbranch" begin
@@ -85,8 +82,8 @@ function bb_run(optimizer_name, optimizer; large=true)
BB.StrongBranching(), BB.StrongBranching(),
BB.ReliabilityBranching(), BB.ReliabilityBranching(),
BB.HybridBranching(), BB.HybridBranching(),
BB.StrongBranching(aggregation=:min), BB.StrongBranching(aggregation = :min),
BB.ReliabilityBranching(aggregation=:min, collect=true), BB.ReliabilityBranching(aggregation = :min, collect = true),
] ]
h5 = H5File("$FIXTURES/$instance.h5") h5 = H5File("$FIXTURES/$instance.h5")
mip_obj_bound = h5.get_scalar("mip_obj_bound") mip_obj_bound = h5.get_scalar("mip_obj_bound")
@@ -107,13 +104,13 @@ function bb_run(optimizer_name, optimizer; large=true)
end end
@testset "collect" begin @testset "collect" begin
rule = BB.ReliabilityBranching(collect=true) rule = BB.ReliabilityBranching(collect = true)
BB.collect!( BB.collect!(
optimizer, optimizer,
"$FIXTURES/bell5.mps.gz", "$FIXTURES/bell5.mps.gz",
node_limit=100, node_limit = 100,
print_interval=10, print_interval = 10,
branch_rule=rule, branch_rule = rule,
) )
n_sb = rule.stats.num_strong_branch_calls n_sb = rule.stats.num_strong_branch_calls
h5 = H5File("$FIXTURES/bell5.h5") h5 = H5File("$FIXTURES/bell5.h5")
@@ -131,67 +128,3 @@ function test_bb()
@time bb_run("HiGHS", optimizer_with_attributes(HiGHS.Optimizer)) @time bb_run("HiGHS", optimizer_with_attributes(HiGHS.Optimizer))
# @time bb_run("CPLEX", optimizer_with_attributes(CPLEX.Optimizer, "CPXPARAM_Threads" => 1)) # @time bb_run("CPLEX", optimizer_with_attributes(CPLEX.Optimizer, "CPXPARAM_Threads" => 1))
end end
function test_bb_replay()
rule_sb = BB.StrongBranching()
rule_rb = BB.ReliabilityBranching()
optimizer = optimizer_with_attributes(HiGHS.Optimizer)
filenames = [replace(f, ".h5" => "") for f in glob("test/fixtures/stab/*.h5")]
results_filename = "tmp.csv"
lk = ReentrantLock()
results = []
function push_result(r)
lock(lk) do
push!(results, r)
df = DataFrame()
for row in results
push!(df, row, cols=:union)
end
CSV.write(results_filename, df)
end
end
function solve(filename; replay=nothing, skip=false, rule)
has_replay = (replay !== nothing)
h5 = H5File("$filename.h5", "r")
mip_obj_bound = h5.get_scalar("mip_obj_bound")
@show filename
@show has_replay
h5.file.close()
mip = BB.init(optimizer)
BB.read!(mip, "$filename.mps.gz")
time_solve = @elapsed begin
pool, replay = BB.solve!(
mip,
initial_primal_bound=mip_obj_bound,
print_interval=100,
node_limit=1_000,
branch_rule=rule,
replay=replay,
)
end
if !skip
push_result(
Dict(
"Filename" => filename,
"Replay?" => has_replay,
"Solve time (s)" => time_solve,
"Relative MIP gap (%)" => round(pool.gap * 100, digits=3)
)
)
end
return replay
end
# Solve reference instance
replay = solve(filenames[1], skip=true, rule=rule_sb)
# Solve perturbations
for i in 2:6
solve(filenames[i], rule=rule_rb, replay=nothing)
solve(filenames[i], rule=rule_rb, replay=deepcopy(replay))
end
return
end

View File

@@ -24,6 +24,7 @@ include("Cuts/tableau/test_gmi_dual.jl")
include("problems/test_setcover.jl") include("problems/test_setcover.jl")
include("problems/test_stab.jl") include("problems/test_stab.jl")
include("problems/test_tsp.jl") include("problems/test_tsp.jl")
include("problems/test_maxcut.jl")
include("solvers/test_jump.jl") include("solvers/test_jump.jl")
include("test_io.jl") include("test_io.jl")
include("test_usage.jl") include("test_usage.jl")
@@ -37,6 +38,7 @@ function runtests()
test_problems_setcover() test_problems_setcover()
test_problems_stab() test_problems_stab()
test_problems_tsp() test_problems_tsp()
test_problems_maxcut()
test_solvers_jump() test_solvers_jump()
test_usage() test_usage()
test_cuts() test_cuts()

View File

@@ -0,0 +1,54 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020-2025, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
using PyCall
function test_problems_maxcut()
np = pyimport("numpy")
random = pyimport("random")
scipy_stats = pyimport("scipy.stats")
randint = scipy_stats.randint
uniform = scipy_stats.uniform
# Set random seed
random.seed(42)
np.random.seed(42)
# Build random instance
data = MaxCutGenerator(
n = randint(low = 10, high = 11),
p = uniform(loc = 0.5, scale = 0.0),
fix_graph = false,
).generate(
1,
)[1]
# Build model
model = build_maxcut_model_jump(data, optimizer = SCIP.Optimizer)
# Check static features
h5 = H5File(tempname(), "w")
model.extract_after_load(h5)
obj_linear = h5.get_array("static_var_obj_coeffs")
obj_quad = h5.get_array("static_var_obj_coeffs_quad")
@test obj_linear == [3.0, 1.0, 3.0, 1.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0]
@test obj_quad == [
0.0 0.0 -1.0 1.0 -1.0 0.0 0.0 0.0 -1.0 -1.0
0.0 0.0 1.0 -1.0 0.0 -1.0 -1.0 0.0 0.0 1.0
0.0 0.0 0.0 0.0 0.0 -1.0 0.0 0.0 -1.0 -1.0
0.0 0.0 0.0 0.0 0.0 -1.0 1.0 -1.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0
0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0
0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 -1.0
0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0
0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0
]
# Check optimal solution
model.optimize()
model.extract_after_mip(h5)
@test h5.get_scalar("mip_obj_value") == -4
h5.close()
end