2 Commits

Author SHA1 Message Date
cbedb02a9f Merge branch 'dev' into feature/replay 2024-11-21 09:40:06 -06:00
20d6570ea6 Replay 2024-01-11 11:26:45 -06:00
33 changed files with 885 additions and 1513 deletions

View File

@@ -1,12 +1,11 @@
name = "MIPLearn"
uuid = "2b1277c3-b477-4c49-a15e-7ba350325c68"
authors = ["Alinson S Xavier <git@axavier.org>"]
version = "0.4.2"
version = "0.4.0"
[deps]
Conda = "8f4d0f93-b110-5947-807f-2305c1781a2d"
DataStructures = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8"
Gurobi = "2e9cd046-0924-5485-92f1-d5272153d98b"
HDF5 = "f67ccb44-e63f-5c2f-98bd-6dc0ccc4ba2f"
HiGHS = "87dc4568-4c63-4d18-b0c0-bb2238e4078b"
JLD2 = "033835bb-8acc-5ee8-8aae-3f567f8a3819"
@@ -29,7 +28,6 @@ TimerOutputs = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f"
[compat]
Conda = "1"
DataStructures = "0.18"
Gurobi = "1.7.5"
HDF5 = "0.16"
HiGHS = "1"
JLD2 = "0.4"
@@ -38,10 +36,8 @@ JuMP = "1"
KLU = "0.4"
MathOptInterface = "1"
OrderedCollections = "1"
PrecompileTools = "1"
PyCall = "1"
Requires = "1"
SCIP = "0.12"
Statistics = "1"
TimerOutputs = "0.5"
julia = "1"

2
deps/build.jl vendored
View File

@@ -5,7 +5,7 @@ function install_miplearn()
Conda.update()
pip = joinpath(dirname(pyimport("sys").executable), "pip")
isfile(pip) || error("$pip: invalid path")
run(`$pip install miplearn==0.4.4`)
run(`$pip install miplearn==0.4.0`)
end
install_miplearn()

View File

@@ -6,7 +6,7 @@ using Printf
function print_progress_header()
@printf(
"%8s %9s %9s %13s %13s %9s %9s %13s %9s %-24s %9s %9s %6s %6s",
"%8s %9s %9s %13s %13s %9s %6s %13s %6s %-24s %9s %9s %6s %6s",
"time",
"processed",
"pending",
@@ -31,9 +31,9 @@ function print_progress(
node::Node;
time_elapsed::Float64,
print_interval::Int,
primal_update::Bool,
primal_update::Bool
)::Nothing
if (pool.processed % print_interval == 0) || isempty(pool.pending) || primal_update
if (pool.processed % print_interval == 0) || isempty(pool.pending)
if isempty(node.branch_vars)
branch_var_name = "---"
branch_lb = "---"
@@ -46,7 +46,7 @@ function print_progress(
branch_ub = @sprintf("%9.2f", last(node.branch_ub))
end
@printf(
"%8.2f %9d %9d %13.6e %13.6e %9.2e %9d %13.6e %9s %-24s %9s %9s %6d %6d",
"%8.2f %9d %9d %13.6e %13.6e %9.2e %6d %13.6e %6s %-24s %9s %9s %6d %6d",
time_elapsed,
pool.processed,
length(pool.processing) + length(pool.pending),

View File

@@ -134,11 +134,7 @@ function _get_int_variables(
var_ub = constr.upper
MOI.delete(optimizer, _upper_bound_index(var))
end
MOI.add_constraint(
optimizer,
MOI.VariableIndex(var.index),
MOI.Interval(var_lb, var_ub),
)
MOI.add_constraint(optimizer, var, MOI.Interval(var_lb, var_ub))
end
push!(vars, var)
push!(lb, var_lb)

View File

@@ -8,10 +8,10 @@ import Base.Threads: threadid
function take(
pool::NodePool;
suggestions::Array{Node} = [],
suggestions::Array{Node}=[],
time_remaining::Float64,
gap_limit::Float64,
node_limit::Int,
node_limit::Int
)::Union{Symbol,Node}
t = threadid()
lock(pool.lock) do
@@ -53,8 +53,8 @@ function offer(
pool::NodePool;
parent_node::Union{Nothing,Node},
child_nodes::Vector{Node},
time_elapsed::Float64 = 0.0,
print_interval::Int = 100,
time_elapsed::Float64=0.0,
print_interval::Int=100
)::Nothing
lock(pool.lock) do
primal_update = false
@@ -101,30 +101,32 @@ function offer(
# Update branching variable history
branch_var = child_nodes[1].branch_vars[end]
offset = findfirst(isequal(branch_var), parent_node.fractional_variables)
x = parent_node.fractional_values[offset]
obj_change_up = child_nodes[1].obj - parent_node.obj
obj_change_down = child_nodes[2].obj - parent_node.obj
_update_var_history(
pool = pool,
var = branch_var,
x = x,
obj_change_down = obj_change_down,
obj_change_up = obj_change_up,
)
# Update global history
pool.history.avg_pseudocost_up =
mean(vh.pseudocost_up for vh in values(pool.var_history))
pool.history.avg_pseudocost_down =
mean(vh.pseudocost_down for vh in values(pool.var_history))
if offset !== nothing
x = parent_node.fractional_values[offset]
obj_change_up = child_nodes[1].obj - parent_node.obj
obj_change_down = child_nodes[2].obj - parent_node.obj
_update_var_history(
pool=pool,
var=branch_var,
x=x,
obj_change_down=obj_change_down,
obj_change_up=obj_change_up,
)
# Update global history
pool.history.avg_pseudocost_up =
mean(vh.pseudocost_up for vh in values(pool.var_history))
pool.history.avg_pseudocost_down =
mean(vh.pseudocost_down for vh in values(pool.var_history))
end
end
for node in child_nodes
print_progress(
pool,
node,
time_elapsed = time_elapsed,
print_interval = print_interval,
primal_update = isfinite(node.obj) && isempty(node.fractional_variables),
time_elapsed=time_elapsed,
print_interval=print_interval,
primal_update=isfinite(node.obj) && isempty(node.fractional_variables),
)
end
end
@@ -136,7 +138,7 @@ function _update_var_history(;
var::Variable,
x::Float64,
obj_change_down::Float64,
obj_change_up::Float64,
obj_change_up::Float64
)::Nothing
# Create new history entry
if var keys(pool.var_history)

View File

@@ -10,16 +10,22 @@ import ..H5File
function solve!(
mip::MIP;
time_limit::Float64 = Inf,
node_limit::Int = typemax(Int),
gap_limit::Float64 = 1e-4,
print_interval::Int = 5,
initial_primal_bound::Float64 = Inf,
branch_rule::VariableBranchingRule = ReliabilityBranching(),
enable_plunging = true,
)::NodePool
time_limit::Float64=Inf,
node_limit::Int=typemax(Int),
gap_limit::Float64=1e-4,
print_interval::Int=5,
initial_primal_bound::Float64=Inf,
branch_rule::VariableBranchingRule=ReliabilityBranching(),
enable_plunging=true,
replay=nothing
)::Tuple{NodePool,ReplayInfo}
if replay === nothing
replay = ReplayInfo()
end
time_initial = time()
pool = NodePool(mip = mip)
pool = NodePool(mip=mip, next_index=replay.next_index)
pool.primal_bound = initial_primal_bound
root_node = _create_node(mip)
@@ -34,9 +40,9 @@ function solve!(
offer(
pool,
parent_node = nothing,
child_nodes = [root_node],
print_interval = print_interval,
parent_node=nothing,
child_nodes=[root_node],
print_interval=print_interval,
)
@threads for t = 1:nthreads()
child_one, child_zero, suggestions = nothing, nothing, Node[]
@@ -47,10 +53,10 @@ function solve!(
end
node = take(
pool,
suggestions = suggestions,
time_remaining = time_limit - time_elapsed,
node_limit = node_limit,
gap_limit = gap_limit,
suggestions=suggestions,
time_remaining=time_limit - time_elapsed,
node_limit=node_limit,
gap_limit=gap_limit,
)
if node == :END
break
@@ -64,9 +70,24 @@ function solve!(
@assert status == :Optimal
_unset_node_bounds(node)
# Find branching variable
ids = generate_indices(pool, 2)
branch_var = find_branching_var(branch_rule, node, pool)
if node.index in keys(replay.node_decisions)
decision = replay.node_decisions[node.index]
ids = decision.ids
branch_var = decision.branch_var
var_value = decision.var_value
else
# Find branching variable
ids = generate_indices(pool, 2)
branch_var = find_branching_var(branch_rule, node, pool)
# Query current fractional value
offset = findfirst(isequal(branch_var), node.fractional_variables)
var_value = node.fractional_values[offset]
# Update replay
decision = ReplayNodeDecision(; branch_var, var_value, ids)
replay.node_decisions[node.index] = decision
end
# Find current variable lower and upper bounds
offset = findfirst(isequal(branch_var), mip.int_vars)
@@ -79,46 +100,43 @@ function solve!(
end
end
# Query current fractional value
offset = findfirst(isequal(branch_var), node.fractional_variables)
var_value = node.fractional_values[offset]
child_zero = _create_node(
mip,
index = ids[2],
parent = node,
branch_var = branch_var,
branch_var_lb = var_lb,
branch_var_ub = floor(var_value),
index=ids[2],
parent=node,
branch_var=branch_var,
branch_var_lb=var_lb,
branch_var_ub=floor(var_value),
)
child_one = _create_node(
mip,
index = ids[1],
parent = node,
branch_var = branch_var,
branch_var_lb = ceil(var_value),
branch_var_ub = var_ub,
index=ids[1],
parent=node,
branch_var=branch_var,
branch_var_lb=ceil(var_value),
branch_var_ub=var_ub,
)
offer(
pool,
parent_node = node,
child_nodes = [child_one, child_zero],
time_elapsed = time_elapsed,
print_interval = print_interval,
parent_node=node,
child_nodes=[child_one, child_zero],
time_elapsed=time_elapsed,
print_interval=print_interval,
)
end
end
end
return pool
replay.next_index = pool.next_index
return pool, replay
end
function _create_node(
mip;
index::Int = 0,
parent::Union{Nothing,Node} = nothing,
branch_var::Union{Nothing,Variable} = nothing,
branch_var_lb::Union{Nothing,Float64} = nothing,
branch_var_ub::Union{Nothing,Float64} = nothing,
index::Int=0,
parent::Union{Nothing,Node}=nothing,
branch_var::Union{Nothing,Variable}=nothing,
branch_var_lb::Union{Nothing,Float64}=nothing,
branch_var_ub::Union{Nothing,Float64}=nothing
)::Node
if parent === nothing
branch_vars = Variable[]

View File

@@ -72,3 +72,14 @@ Base.@kwdef mutable struct NodePool
history::History = History()
var_history::Dict{Variable,VariableHistory} = Dict()
end
Base.@kwdef struct ReplayNodeDecision
branch_var
var_value
ids
end
Base.@kwdef mutable struct ReplayInfo
node_decisions::Dict{Int,ReplayNodeDecision} = Dict()
next_index::Int = 1
end

View File

@@ -185,7 +185,7 @@ function collect_gmi(
)
end
function select_gmi_rows(data, basis, x; max_rows = 10, atol = 0.001)
function select_gmi_rows(data, basis, x; max_rows = 10, atol = 1e-4)
candidate_rows = [
r for r = 1:length(basis.var_basic) if (
(data.var_types[basis.var_basic[r]] != 'C') &&
@@ -199,82 +199,23 @@ function select_gmi_rows(data, basis, x; max_rows = 10, atol = 0.001)
return [candidate_rows[perm[i]] for i = 1:min(length(perm), max_rows)]
end
# function compute_gmi(data::ProblemData, tableau::Tableau)::ConstraintSet
# @timeit "Initialization" begin
# nrows, ncols = size(tableau.lhs)
# ub = Float64[Inf for _ = 1:nrows]
# lb = Float64[0.999 for _ = 1:nrows]
# tableau_I, tableau_J, tableau_V = findnz(tableau.lhs)
# lhs_I = Int[]
# lhs_J = Int[]
# lhs_V = Float64[]
# end
# @timeit "Compute coefficients" begin
# for k = 1:nnz(tableau.lhs)
# i::Int = tableau_I[k]
# j::Int = tableau_J[k]
# v::Float64 = 0.0
# frac_alpha_j = frac(tableau_V[k])
# alpha_j = tableau_V[k]
# beta = frac(tableau.rhs[i])
# if data.var_types[j] == 'C'
# if alpha_j >= 0
# v = alpha_j / beta
# else
# v = -alpha_j / (1 - beta)
# end
# else
# if frac_alpha_j < beta
# v = frac_alpha_j / beta
# else
# v = (1 - frac_alpha_j) / (1 - beta)
# end
# end
# if abs(v) > 1e-8
# push!(lhs_I, i)
# push!(lhs_J, tableau_J[k])
# push!(lhs_V, v)
# end
# end
# end
# @timeit "Convert to ConstraintSet" begin
# lhs = sparse(lhs_I, lhs_J, lhs_V, nrows, ncols)
# cs = ConstraintSet(; lhs, ub, lb)
# end
# return cs
# end
function compute_gmi(data::ProblemData, tableau::Tableau)::ConstraintSet
@timeit "Initialization" begin
nrows::Int, ncols::Int = size(tableau.lhs)
var_types::Vector{Char} = data.var_types
tableau_rhs::Vector{Float64} = tableau.rhs
tableau_I::Vector{Int}, tableau_J::Vector{Int}, tableau_V::Vector{Float64} = findnz(tableau.lhs)
end
@timeit "Pre-allocation" begin
cut_ub::Vector{Float64} = fill(Inf, nrows)
cut_lb::Vector{Float64} = fill(0.999, nrows)
nnz_tableau::Int = length(tableau_I)
cut_lhs_I::Vector{Int} = Vector{Int}(undef, nnz_tableau)
cut_lhs_J::Vector{Int} = Vector{Int}(undef, nnz_tableau)
cut_lhs_V::Vector{Float64} = Vector{Float64}(undef, nnz_tableau)
cut_hash::Vector{UInt64} = zeros(UInt64, nrows)
nnz_count::Int = 0
end
nrows, ncols = size(tableau.lhs)
ub = Float64[Inf for _ = 1:nrows]
lb = Float64[0.9999 for _ = 1:nrows]
tableau_I, tableau_J, tableau_V = findnz(tableau.lhs)
lhs_I = Int[]
lhs_J = Int[]
lhs_V = Float64[]
@timeit "Compute coefficients" begin
@inbounds for k = 1:nnz_tableau
for k = 1:nnz(tableau.lhs)
i::Int = tableau_I[k]
j::Int = tableau_J[k]
alpha_j::Float64 = tableau_V[k]
frac_alpha_j::Float64 = alpha_j - floor(alpha_j)
beta_i::Float64 = tableau_rhs[i]
beta::Float64 = beta_i - floor(beta_i)
v::Float64 = 0
# Compute coefficient
if var_types[j] == 'C'
v::Float64 = 0.0
frac_alpha_j = frac(tableau_V[k])
alpha_j = tableau_V[k]
beta = frac(tableau.rhs[i])
if data.var_types[j] == 'C'
if alpha_j >= 0
v = alpha_j / beta
else
@@ -287,34 +228,16 @@ function compute_gmi(data::ProblemData, tableau::Tableau)::ConstraintSet
v = (1 - frac_alpha_j) / (1 - beta)
end
end
# Store if significant
if abs(v) > 1e-8
nnz_count += 1
cut_lhs_I[nnz_count] = i
cut_lhs_J[nnz_count] = j
cut_lhs_V[nnz_count] = v
cut_hash[i] = hash(j, cut_hash[i])
cut_hash[i] = hash(v, cut_hash[i])
push!(lhs_I, i)
push!(lhs_J, tableau_J[k])
push!(lhs_V, v)
end
end
lhs = sparse(lhs_I, lhs_J, lhs_V, nrows, ncols)
end
@timeit "Resize arrays to actual size" begin
resize!(cut_lhs_I, nnz_count)
resize!(cut_lhs_J, nnz_count)
resize!(cut_lhs_V, nnz_count)
end
# TODO: Build cut in compressed row format instead of converting
@timeit "Convert to ConstraintSet" begin
cut_lhs::SparseMatrixCSC = sparse(cut_lhs_I, cut_lhs_J, cut_lhs_V, nrows, ncols)
cs::ConstraintSet = ConstraintSet(; lhs=cut_lhs, ub=cut_ub, lb=cut_lb, hash=cut_hash)
end
return cs
return ConstraintSet(; lhs, ub, lb)
end
export compute_gmi,
frac, select_gmi_rows, assert_cuts_off, assert_does_not_cut_off, collect_gmi

File diff suppressed because it is too large Load Diff

View File

@@ -27,26 +27,25 @@ function assert_eq(a, b; atol = 1e-4)
end
function assert_cuts_off(cuts::ConstraintSet, x::Vector{Float64}, tol = 1e-6)
vals = cuts.lhs * x
for i = 1:length(cuts.lb)
if (vals[i] <= cuts.ub[i] - tol) && (vals[i] >= cuts.lb[i] + tol)
throw(ErrorException("inequality $i fails to cut off fractional solution: $(cuts.lb[i]) <= $(vals[i]) <= $(cuts.ub[i])"))
val = cuts.lhs[i, :]' * x
if (val <= cuts.ub[i] - tol) && (val >= cuts.lb[i] + tol)
throw(ErrorException("inequality fails to cut off fractional solution"))
end
end
end
function assert_does_not_cut_off(cuts::ConstraintSet, x::Vector{Float64}; tol = 1e-6)
vals = cuts.lhs * x
for i = 1:length(cuts.lb)
if (vals[i] >= cuts.ub[i]) || (vals[i] <= cuts.lb[i])
throw(ErrorException("inequality $i cuts off integer solution: $(cuts.lb[i]) <= $(vals[i]) <= $(cuts.ub[i])"))
val = cuts.lhs[i, :]' * x
ub = cuts.ub[i]
lb = cuts.lb[i]
if (val >= ub) || (val <= lb)
throw(
ErrorException(
"inequality $i cuts off integer solution ($lb <= $val <= $ub)",
),
)
end
end
end
function assert_int(x::Float64, tol=1e-5)
fx = frac(x)
if min(fx, 1 - fx) >= tol
throw(ErrorException("Number must be integer: $x"))
end
end

View File

@@ -18,10 +18,10 @@ Base.@kwdef mutable struct ProblemData
end
Base.@kwdef mutable struct Tableau
obj::Vector{Float64}
lhs::SparseMatrixCSC
rhs::Vector{Float64}
z::Float64
obj::Any
lhs::Any
rhs::Any
z::Any
end
Base.@kwdef mutable struct Basis
@@ -35,7 +35,6 @@ Base.@kwdef mutable struct ConstraintSet
lhs::SparseMatrixCSC
ub::Vector{Float64}
lb::Vector{Float64}
hash::Union{Nothing,Vector{UInt64}} = nothing
end
export ProblemData, Tableau, Basis, ConstraintSet

View File

@@ -4,161 +4,48 @@
using KLU
using TimerOutputs
using Gurobi
function get_basis(model::JuMP.Model)::Basis
if isa(unsafe_backend(model), Gurobi.Optimizer)
return get_basis_gurobi(model)
end
var_basic = Int[]
var_nonbasic = Int[]
constr_basic = Int[]
constr_nonbasic = Int[]
@timeit "Initialization" begin
var_basic = Int[]
var_nonbasic = Int[]
constr_basic = Int[]
constr_nonbasic = Int[]
nvars = num_variables(model)
sizehint!(var_basic, nvars)
sizehint!(var_nonbasic, nvars)
end
@timeit "Query variables" begin
for (i, var) in enumerate(all_variables(model))
bstatus = MOI.get(model, MOI.VariableBasisStatus(), var)
if bstatus == MOI.BASIC
push!(var_basic, i)
elseif bstatus == MOI.NONBASIC_AT_LOWER
push!(var_nonbasic, i)
else
error("Unknown basis status: $bstatus")
end
# Variables
for (i, var) in enumerate(all_variables(model))
bstatus = MOI.get(model, MOI.VariableBasisStatus(), var)
if bstatus == MOI.BASIC
push!(var_basic, i)
elseif bstatus == MOI.NONBASIC_AT_LOWER
push!(var_nonbasic, i)
else
error("Unknown basis status: $bstatus")
end
end
@timeit "Query constraints" begin
constr_index = 1
for (ftype, stype) in list_of_constraint_types(model)
for constr in all_constraints(model, ftype, stype)
if ftype == VariableRef
# nop
elseif ftype == AffExpr
bstatus = MOI.get(model, MOI.ConstraintBasisStatus(), constr)
if bstatus == MOI.BASIC
push!(constr_basic, constr_index)
elseif bstatus == MOI.NONBASIC
push!(constr_nonbasic, constr_index)
else
error("Unknown basis status: $bstatus")
end
constr_index += 1
# Constraints
constr_index = 1
for (ftype, stype) in list_of_constraint_types(model)
for constr in all_constraints(model, ftype, stype)
if ftype == VariableRef
# nop
elseif ftype == AffExpr
bstatus = MOI.get(model, MOI.ConstraintBasisStatus(), constr)
if bstatus == MOI.BASIC
push!(constr_basic, constr_index)
elseif bstatus == MOI.NONBASIC
push!(constr_nonbasic, constr_index)
else
error("Unsupported constraint type: ($ftype, $stype)")
error("Unknown basis status: $bstatus")
end
end
end
end
@timeit "Build basis struct" begin
basis = Basis(; var_basic, var_nonbasic, constr_basic, constr_nonbasic)
end
return basis
end
function set_basis(model::JuMP.Model, basis::Basis)
if isa(unsafe_backend(model), Gurobi.Optimizer)
# NOP
return
end
@timeit "Initialization" begin
nvars = num_variables(model)
gurobi_model = unsafe_backend(model).inner
end
@timeit "Set variable basis" begin
var_basis_statuses = Vector{Cint}(undef, nvars)
fill!(var_basis_statuses, -1) # Default to GRB_NONBASIC_LOWER
for var_idx in basis.var_basic
var_basis_statuses[var_idx] = 0 # GRB_BASIC
end
ret = GRBsetintattrarray(gurobi_model, "VBasis", 0, nvars, var_basis_statuses)
if ret != 0
error("Failed to set variable basis statuses in Gurobi: error code $ret")
end
end
@timeit "Set constraint basis" begin
nconstr = num_constraints(model, AffExpr, MOI.EqualTo{Float64})
constr_basis_statuses = Vector{Cint}(undef, nconstr)
fill!(constr_basis_statuses, -1) # Default to GRB_NONBASIC
for constr_idx in basis.constr_basic
constr_basis_statuses[constr_idx] = 0 # GRB_BASIC
end
ret = GRBsetintattrarray(gurobi_model, "CBasis", 0, nconstr, constr_basis_statuses)
if ret != 0
error("Failed to set constraint basis statuses in Gurobi: error code $ret")
end
end
return nothing
end
function get_basis_gurobi(model::JuMP.Model)::Basis
@timeit "Initialization" begin
var_basic = Int[]
var_nonbasic = Int[]
constr_basic = Int[]
constr_nonbasic = Int[]
nvars = num_variables(model)
sizehint!(var_basic, nvars)
sizehint!(var_nonbasic, nvars)
gurobi_model = unsafe_backend(model).inner
end
@timeit "Query variables" begin
var_basis_statuses = Vector{Cint}(undef, nvars)
ret = GRBgetintattrarray(gurobi_model, "VBasis", 0, nvars, var_basis_statuses)
if ret != 0
error("Failed to get variable basis statuses from Gurobi: error code $ret")
end
for i in 1:nvars
if var_basis_statuses[i] == 0 # GRB_BASIC
push!(var_basic, i)
elseif var_basis_statuses[i] == -1 # GRB_NONBASIC_LOWER
push!(var_nonbasic, i)
constr_index += 1
else
error("Unknown variable basis status: $(var_basis_statuses[i])")
error("Unsupported constraint type: ($ftype, $stype)")
end
end
end
@timeit "Query constraints" begin
nconstr = num_constraints(model, AffExpr, MOI.EqualTo{Float64})
constr_basis_statuses = Vector{Cint}(undef, nconstr)
ret = GRBgetintattrarray(gurobi_model, "CBasis", 0, nconstr, constr_basis_statuses)
if ret != 0
error("Failed to get constraint basis statuses from Gurobi: error code $ret")
end
for i in 1:nconstr
if constr_basis_statuses[i] == 0 # GRB_BASIC
push!(constr_basic, i)
elseif constr_basis_statuses[i] == -1 # GRB_NONBASIC
push!(constr_nonbasic, i)
else
error("Unknown constraint basis status: $(constr_basis_statuses[i])")
end
end
end
@timeit "Build basis struct" begin
basis = Basis(; var_basic, var_nonbasic, constr_basic, constr_nonbasic)
end
return basis
return Basis(; var_basic, var_nonbasic, constr_basic, constr_nonbasic)
end
function get_x(model::JuMP.Model)
@@ -171,12 +58,7 @@ function compute_tableau(
x::Union{Nothing,Vector{Float64}} = nothing,
rows::Union{Vector{Int},Nothing} = nothing,
tol = 1e-8,
estimated_density = 0.10,
)::Tableau
if isnan(estimated_density) || estimated_density <= 0
estimated_density = 0.10
end
@timeit "Split data" begin
nrows, ncols = size(data.constr_lhs)
lhs_slacks = sparse(I, nrows, nrows)
@@ -191,71 +73,35 @@ function compute_tableau(
factor = klu(sparse(lhs_b'))
end
@timeit "Initialize arrays" begin
num_rows = length(rows)
tableau_rhs::Array{Float64} = zeros(num_rows)
tableau_rowptr::Array{Int} = zeros(Int, num_rows + 1)
tableau_colval::Array{Int} = Int[]
tableau_nzval::Array{Float64} = Float64[]
estimated_nnz::Int = round(num_rows * ncols * estimated_density)
sizehint!(tableau_colval, estimated_nnz)
sizehint!(tableau_nzval, estimated_nnz)
e::Array{Float64} = zeros(nrows)
sol::Array{Float64} = zeros(nrows)
tableau_row::Array{Float64} = zeros(ncols)
end
A = data.constr_lhs'
b = data.constr_ub
tableau_rowptr[1] = 1
@timeit "Process rows" begin
for k in eachindex(rows)
@timeit "Solve" begin
fill!(e, 0.0)
e[rows[k]] = 1.0
ldiv!(sol, factor, e)
end
@timeit "Compute row" begin
mul!(tableau_row, A, sol)
tableau_rhs[k] = dot(sol, b)
end
needed_space = length(tableau_colval) + ncols
if needed_space > estimated_nnz
@timeit "Grow arrays" begin
estimated_nnz *= 2
sizehint!(tableau_colval, estimated_nnz)
sizehint!(tableau_nzval, estimated_nnz)
end
end
@timeit "Collect nonzeros for row" begin
for j in 1:ncols
val = tableau_row[j]
if abs(val) > tol
push!(tableau_colval, j)
push!(tableau_nzval, val)
end
end
end
tableau_rowptr[k + 1] = length(tableau_colval) + 1
@timeit "Compute tableau" begin
@timeit "Initialize" begin
tableau_rhs = zeros(length(rows))
tableau_lhs = zeros(length(rows), ncols)
end
for k in eachindex(1:length(rows))
@timeit "Prepare inputs" begin
i = rows[k]
e = zeros(nrows)
e[i] = 1.0
end
@timeit "Solve" begin
sol = factor \ e
end
@timeit "Multiply" begin
tableau_lhs[k, :] = sol' * data.constr_lhs
tableau_rhs[k] = sol' * data.constr_ub
end
end
@timeit "Sparsify" begin
tableau_lhs[abs.(tableau_lhs) .<= tol] .= 0
tableau_lhs = sparse(tableau_lhs)
end
end
@timeit "Shrink arrays" begin
sizehint!(tableau_colval, length(tableau_colval))
sizehint!(tableau_nzval, length(tableau_nzval))
end
@timeit "Build sparse matrix" begin
tableau_lhs_transposed = SparseMatrixCSC(ncols, num_rows, tableau_rowptr, tableau_colval, tableau_nzval)
tableau_lhs = transpose(tableau_lhs_transposed)
end
@timeit "Compute tableau objective row" begin
sol = factor \ obj_b
tableau_obj = -data.obj' + sol' * data.constr_lhs
tableau_obj[abs.(tableau_obj).<tol] .= 0
tableau_obj = Array(tableau_obj')
end
# Compute z if solution is provided
@@ -267,4 +113,4 @@ function compute_tableau(
return Tableau(obj = tableau_obj, lhs = tableau_lhs, rhs = tableau_rhs, z = z)
end
export get_basis, get_basis_gurobi, set_basis, get_x, compute_tableau
export get_basis, get_x, compute_tableau

View File

@@ -96,70 +96,46 @@ Base.@kwdef mutable struct AddSlackVariables <: Transform
end
function forward!(t::AddSlackVariables, data::ProblemData)
@timeit "Identify constraint type" begin
nrows, ncols = size(data.constr_lhs)
isequality = abs.(data.constr_ub .- data.constr_lb) .< 1e-6
eq = [i for i = 1:nrows if isequality[i]]
ge = [i for i = 1:nrows if isfinite(data.constr_lb[i]) && !isequality[i]]
le = [i for i = 1:nrows if isfinite(data.constr_ub[i]) && !isequality[i]]
EQ, GE, LE = length(eq), length(ge), length(le)
end
@timeit "Identify slack type" begin
constr_lhs_t = sparse(data.constr_lhs')
function is_integral(row_idx, rhs)
rhs_is_integer = abs(rhs - round(rhs)) <= 1e-6
cols, coeffs = findnz(constr_lhs_t[:, row_idx])[1:2]
vars_are_integer = all(j -> data.var_types[j] ['I', 'B'], cols)
coeffs_are_integer = all(v -> abs(v - round(v)) <= 1e-6, coeffs)
return rhs_is_integer && vars_are_integer && coeffs_are_integer
end
slack_types = [
[is_integral(ge[i], data.constr_lb[ge[i]]) ? 'I' : 'C' for i = 1:GE];
[is_integral(le[i], data.constr_ub[le[i]]) ? 'I' : 'C' for i = 1:LE]
]
end
@timeit "Build M1" begin
t.M1 = [
I spzeros(ncols, GE + LE)
data.constr_lhs[ge, :] spzeros(GE, GE + LE)
-data.constr_lhs[le, :] spzeros(LE, GE + LE)
]
end
@timeit "Build M2" begin
t.M2 = [
zeros(ncols)
data.constr_lb[ge]
-data.constr_ub[le]
]
end
@timeit "Build t.lhs, t.rhs" begin
t.ncols_orig = ncols
t.GE, t.LE = GE, LE
t.lhs_ge = data.constr_lhs[ge, :]
t.lhs_le = data.constr_lhs[le, :]
t.rhs_ge = data.constr_lb[ge]
t.rhs_le = data.constr_ub[le]
end
@timeit "Build data.constr_lhs" begin
data.constr_lhs = [
data.constr_lhs[eq, :] spzeros(EQ, GE) spzeros(EQ, LE)
data.constr_lhs[ge, :] -I spzeros(GE, LE)
data.constr_lhs[le, :] spzeros(LE, GE) I
]
end
@timeit "Build other data fields" begin
data.obj = [data.obj; zeros(GE + LE)]
data.var_lb = [data.var_lb; zeros(GE + LE)]
data.var_ub = [data.var_ub; [Inf for _ = 1:(GE+LE)]]
data.var_names = [data.var_names; ["__s$i" for i = 1:(GE+LE)]]
data.var_types = [data.var_types; slack_types]
data.constr_lb = [
data.constr_lb[eq]
data.constr_lb[ge]
data.constr_ub[le]
]
data.constr_ub = copy(data.constr_lb)
end
nrows, ncols = size(data.constr_lhs)
isequality = abs.(data.constr_ub .- data.constr_lb) .< 1e-6
eq = [i for i = 1:nrows if isequality[i]]
ge = [i for i = 1:nrows if isfinite(data.constr_lb[i]) && !isequality[i]]
le = [i for i = 1:nrows if isfinite(data.constr_ub[i]) && !isequality[i]]
EQ, GE, LE = length(eq), length(ge), length(le)
t.M1 = [
I spzeros(ncols, GE + LE)
data.constr_lhs[ge, :] spzeros(GE, GE + LE)
-data.constr_lhs[le, :] spzeros(LE, GE + LE)
]
t.M2 = [
zeros(ncols)
data.constr_lb[ge]
-data.constr_ub[le]
]
t.ncols_orig = ncols
t.GE, t.LE = GE, LE
t.lhs_ge = data.constr_lhs[ge, :]
t.lhs_le = data.constr_lhs[le, :]
t.rhs_ge = data.constr_lb[ge]
t.rhs_le = data.constr_ub[le]
data.constr_lhs = [
data.constr_lhs[eq, :] spzeros(EQ, GE) spzeros(EQ, LE)
data.constr_lhs[ge, :] -I spzeros(GE, LE)
data.constr_lhs[le, :] spzeros(LE, GE) I
]
data.obj = [data.obj; zeros(GE + LE)]
data.var_lb = [data.var_lb; zeros(GE + LE)]
data.var_ub = [data.var_ub; [Inf for _ = 1:(GE+LE)]]
data.var_names = [data.var_names; ["__s$i" for i = 1:(GE+LE)]]
data.var_types = [data.var_types; ['C' for _ = 1:(GE+LE)]]
data.constr_lb = [
data.constr_lb[eq]
data.constr_lb[ge]
data.constr_ub[le]
]
data.constr_ub = copy(data.constr_lb)
end
function backwards!(t::AddSlackVariables, c::ConstraintSet)
@@ -179,55 +155,71 @@ end
# -----------------------------------------------------------------------------
Base.@kwdef mutable struct SplitFreeVars <: Transform
ncols::Int = 0
is_var_free::Vector{Bool} = []
F::Int = 0
B::Int = 0
free::Vector{Int} = []
others::Vector{Int} = []
end
function forward!(t::SplitFreeVars, data::ProblemData)
lhs = data.constr_lhs
_, ncols = size(lhs)
is_var_free = [!isfinite(data.var_lb[i]) && !isfinite(data.var_ub[i]) for i = 1:ncols]
free_idx = findall(is_var_free)
free = [i for i = 1:ncols if !isfinite(data.var_lb[i]) && !isfinite(data.var_ub[i])]
others = [i for i = 1:ncols if isfinite(data.var_lb[i]) || isfinite(data.var_ub[i])]
t.F = length(free)
t.B = length(others)
t.free, t.others = free, others
data.obj = [
data.obj
[-data.obj[i] for i in free_idx]
data.obj[others]
data.obj[free]
-data.obj[free]
]
data.constr_lhs = [lhs[:, others] lhs[:, free] -lhs[:, free]]
data.var_lb = [
[is_var_free[i] ? 0.0 : data.var_lb[i] for i in 1:ncols]
[0 for _ in free_idx]
data.var_lb[others]
[0.0 for _ in free]
[0.0 for _ in free]
]
data.var_ub = [
[is_var_free[i] ? Inf : data.var_ub[i] for i in 1:ncols]
[Inf for _ in free_idx]
data.var_ub[others]
[Inf for _ in free]
[Inf for _ in free]
]
data.var_types = [
data.var_types
[data.var_types[i] for i in free_idx]
data.var_types[others]
data.var_types[free]
data.var_types[free]
]
data.var_names = [
data.var_names
["$(data.var_names[i])_neg" for i in free_idx]
data.var_names[others]
["$(v)_p" for v in data.var_names[free]]
["$(v)_m" for v in data.var_names[free]]
]
data.constr_lhs = [lhs -lhs[:, free_idx]]
t.is_var_free, t.ncols = is_var_free, ncols
end
function backwards!(t::SplitFreeVars, c::ConstraintSet)
ncols, is_var_free = t.ncols, t.is_var_free
free_idx = findall(is_var_free)
# Convert GE constraints into LE
nrows, _ = size(c.lhs)
ge = [i for i = 1:nrows if isfinite(c.lb[i])]
c.ub[ge], c.lb[ge] = -c.lb[ge], -c.ub[ge]
c.lhs[ge, :] *= -1
for (offset, var_idx) in enumerate(free_idx)
@assert c.lhs[:, var_idx] == -c.lhs[:, ncols+offset]
# Assert only LE constraints are left (EQ constraints are not supported)
@assert all(c.lb .== -Inf)
# Take minimum (weakest) coefficient
B, F = t.B, t.F
for i = 1:F
c.lhs[:, B+i] = min.(c.lhs[:, B+i], -c.lhs[:, B+F+i])
end
c.lhs = c.lhs[:, 1:ncols]
c.lhs = c.lhs[:, 1:(B+F)]
end
function forward(t::SplitFreeVars, p::Vector{Float64})::Vector{Float64}
ncols, is_var_free = t.ncols, t.is_var_free
free_idx = findall(is_var_free)
return [
[is_var_free[i] ? max(0, p[i]) : p[i] for i in 1:ncols]
[max(0, -p[i]) for i in free_idx]
p[t.others]
max.(p[t.free], 0)
max.(-p[t.free], 0)
]
end

View File

@@ -13,7 +13,6 @@ include("collectors.jl")
include("components.jl")
include("extractors.jl")
include("io.jl")
include("problems/maxcut.jl")
include("problems/setcover.jl")
include("problems/stab.jl")
include("problems/tsp.jl")
@@ -25,7 +24,6 @@ function __init__()
__init_components__()
__init_extractors__()
__init_io__()
__init_problems_maxcut__()
__init_problems_setcover__()
__init_problems_stab__()
__init_problems_tsp__()
@@ -39,48 +37,48 @@ include("Cuts/Cuts.jl")
# Precompilation
# =============================================================================
# function __precompile_cuts__()
# function build_model(mps_filename)
# model = read_from_file(mps_filename)
# set_optimizer(model, SCIP.Optimizer)
# return JumpModel(model)
# end
# BASEDIR = dirname(@__FILE__)
# mps_filename = "$BASEDIR/../test/fixtures/bell5.mps.gz"
# h5_filename = "$BASEDIR/../test/fixtures/bell5.h5"
# collect_gmi_dual(
# mps_filename;
# optimizer=HiGHS.Optimizer,
# max_rounds = 10,
# max_cuts_per_round = 500,
# )
# knn = KnnDualGmiComponent(
# extractor = H5FieldsExtractor(instance_fields = ["static_var_obj_coeffs"]),
# k = 2,
# )
# knn.fit([h5_filename, h5_filename])
# solver = LearningSolver(
# components = [
# ExpertPrimalComponent(action = SetWarmStart()),
# knn,
# ],
# skip_lp = true,
# )
# solver.optimize(mps_filename, build_model)
# end
function __precompile_cuts__()
function build_model(mps_filename)
model = read_from_file(mps_filename)
set_optimizer(model, SCIP.Optimizer)
return JumpModel(model)
end
BASEDIR = dirname(@__FILE__)
mps_filename = "$BASEDIR/../test/fixtures/bell5.mps.gz"
h5_filename = "$BASEDIR/../test/fixtures/bell5.h5"
collect_gmi_dual(
mps_filename;
optimizer=HiGHS.Optimizer,
max_rounds = 10,
max_cuts_per_round = 500,
)
knn = KnnDualGmiComponent(
extractor = H5FieldsExtractor(instance_fields = ["static_var_obj_coeffs"]),
k = 2,
)
knn.fit([h5_filename, h5_filename])
solver = LearningSolver(
components = [
ExpertPrimalComponent(action = SetWarmStart()),
knn,
],
skip_lp = true,
)
solver.optimize(mps_filename, build_model)
end
# @setup_workload begin
# using SCIP
# using HiGHS
# using MIPLearn.Cuts
# using PrecompileTools: @setup_workload, @compile_workload
@setup_workload begin
using SCIP
using HiGHS
using MIPLearn.Cuts
using PrecompileTools: @setup_workload, @compile_workload
# __init__()
# Cuts.__init__()
__init__()
Cuts.__init__()
# @compile_workload begin
# __precompile_cuts__()
# end
# end
@compile_workload begin
__precompile_cuts__()
end
end
end # module

View File

@@ -1,31 +0,0 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020-2025, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
using JuMP
global MaxCutData = PyNULL()
global MaxCutGenerator = PyNULL()
function __init_problems_maxcut__()
copy!(MaxCutData, pyimport("miplearn.problems.maxcut").MaxCutData)
copy!(MaxCutGenerator, pyimport("miplearn.problems.maxcut").MaxCutGenerator)
end
function build_maxcut_model_jump(data::Any; optimizer)
if data isa String
data = read_pkl_gz(data)
end
nodes = collect(data.graph.nodes())
edges = collect(data.graph.edges())
model = Model(optimizer)
@variable(model, x[nodes], Bin)
@objective(
model,
Min,
sum(-data.weights[i] * x[e[1]] * (1 - x[e[2]]) for (i, e) in enumerate(edges))
)
return JumpModel(model)
end
export MaxCutData, MaxCutGenerator, build_maxcut_model_jump

View File

@@ -69,13 +69,14 @@ function submit(model::JuMP.Model, constr)
end
function _extract_after_load(model::JuMP.Model, h5)
@info "_extract_after_load"
if JuMP.objective_sense(model) == MOI.MIN_SENSE
h5.put_scalar("static_sense", "min")
else
h5.put_scalar("static_sense", "max")
end
_extract_after_load_vars(model, h5)
_extract_after_load_constrs(model, h5)
@time _extract_after_load_vars(model, h5)
@time _extract_after_load_constrs(model, h5)
end
function _extract_after_load_vars(model::JuMP.Model, h5)
@@ -89,27 +90,14 @@ function _extract_after_load_vars(model::JuMP.Model, h5)
for v in vars
]
types = [JuMP.is_binary(v) ? "B" : JuMP.is_integer(v) ? "I" : "C" for v in vars]
# Linear obj terms
obj = objective_function(model, QuadExpr)
obj_coeffs_linear = [v keys(obj.aff.terms) ? obj.aff.terms[v] : 0.0 for v in vars]
# Quadratic obj terms
if length(obj.terms) > 0
nvars = length(vars)
obj_coeffs_quad = zeros(nvars, nvars)
for (pair, coeff) in obj.terms
obj_coeffs_quad[pair.a.index.value, pair.b.index.value] = coeff
end
h5.put_array("static_var_obj_coeffs_quad", obj_coeffs_quad)
end
obj = objective_function(model, AffExpr)
obj_coeffs = [v keys(obj.terms) ? obj.terms[v] : 0.0 for v in vars]
h5.put_array("static_var_names", to_str_array(JuMP.name.(vars)))
h5.put_array("static_var_types", to_str_array(types))
h5.put_array("static_var_lower_bounds", lb)
h5.put_array("static_var_upper_bounds", ub)
h5.put_array("static_var_obj_coeffs", obj_coeffs_linear)
h5.put_scalar("static_obj_offset", obj.aff.constant)
h5.put_array("static_var_obj_coeffs", obj_coeffs)
h5.put_scalar("static_obj_offset", obj.constant)
end
function _extract_after_load_constrs(model::JuMP.Model, h5)
@@ -156,7 +144,7 @@ function _extract_after_load_constrs(model::JuMP.Model, h5)
end
end
if isempty(names)
return
error("no model constraints found; note that MIPLearn ignores unnamed constraints")
end
lhs = sparse(lhs_rows, lhs_cols, lhs_values, length(rhs), JuMP.num_variables(model))
h5.put_sparse("static_constr_lhs", lhs)
@@ -166,10 +154,11 @@ function _extract_after_load_constrs(model::JuMP.Model, h5)
end
function _extract_after_lp(model::JuMP.Model, h5)
@info "_extract_after_lp"
h5.put_scalar("lp_wallclock_time", solve_time(model))
h5.put_scalar("lp_obj_value", objective_value(model))
_extract_after_lp_vars(model, h5)
_extract_after_lp_constrs(model, h5)
@time _extract_after_lp_vars(model, h5)
@time _extract_after_lp_constrs(model, h5)
end
function _extract_after_lp_vars(model::JuMP.Model, h5)
@@ -195,46 +184,46 @@ function _extract_after_lp_vars(model::JuMP.Model, h5)
end
h5.put_array("lp_var_basis_status", to_str_array(basis_status))
# Sensitivity analysis
obj_coeffs = h5.get_array("static_var_obj_coeffs")
sensitivity_report = lp_sensitivity_report(model)
sa_obj_down, sa_obj_up = Float64[], Float64[]
sa_lb_down, sa_lb_up = Float64[], Float64[]
sa_ub_down, sa_ub_up = Float64[], Float64[]
for (i, v) in enumerate(vars)
# Objective function
(delta_down, delta_up) = sensitivity_report[v]
push!(sa_obj_down, delta_down + obj_coeffs[i])
push!(sa_obj_up, delta_up + obj_coeffs[i])
# # Sensitivity analysis
# obj_coeffs = h5.get_array("static_var_obj_coeffs")
# sensitivity_report = lp_sensitivity_report(model)
# sa_obj_down, sa_obj_up = Float64[], Float64[]
# sa_lb_down, sa_lb_up = Float64[], Float64[]
# sa_ub_down, sa_ub_up = Float64[], Float64[]
# for (i, v) in enumerate(vars)
# # Objective function
# (delta_down, delta_up) = sensitivity_report[v]
# push!(sa_obj_down, delta_down + obj_coeffs[i])
# push!(sa_obj_up, delta_up + obj_coeffs[i])
# Lower bound
if has_lower_bound(v)
constr = LowerBoundRef(v)
(delta_down, delta_up) = sensitivity_report[constr]
push!(sa_lb_down, lower_bound(v) + delta_down)
push!(sa_lb_up, lower_bound(v) + delta_up)
else
push!(sa_lb_down, -Inf)
push!(sa_lb_up, -Inf)
end
# # Lower bound
# if has_lower_bound(v)
# constr = LowerBoundRef(v)
# (delta_down, delta_up) = sensitivity_report[constr]
# push!(sa_lb_down, lower_bound(v) + delta_down)
# push!(sa_lb_up, lower_bound(v) + delta_up)
# else
# push!(sa_lb_down, -Inf)
# push!(sa_lb_up, -Inf)
# end
# Upper bound
if has_upper_bound(v)
constr = JuMP.UpperBoundRef(v)
(delta_down, delta_up) = sensitivity_report[constr]
push!(sa_ub_down, upper_bound(v) + delta_down)
push!(sa_ub_up, upper_bound(v) + delta_up)
else
push!(sa_ub_down, Inf)
push!(sa_ub_up, Inf)
end
end
h5.put_array("lp_var_sa_obj_up", sa_obj_up)
h5.put_array("lp_var_sa_obj_down", sa_obj_down)
h5.put_array("lp_var_sa_ub_up", sa_ub_up)
h5.put_array("lp_var_sa_ub_down", sa_ub_down)
h5.put_array("lp_var_sa_lb_up", sa_lb_up)
h5.put_array("lp_var_sa_lb_down", sa_lb_down)
# # Upper bound
# if has_upper_bound(v)
# constr = JuMP.UpperBoundRef(v)
# (delta_down, delta_up) = sensitivity_report[constr]
# push!(sa_ub_down, upper_bound(v) + delta_down)
# push!(sa_ub_up, upper_bound(v) + delta_up)
# else
# push!(sa_ub_down, Inf)
# push!(sa_ub_up, Inf)
# end
# end
# h5.put_array("lp_var_sa_obj_up", sa_obj_up)
# h5.put_array("lp_var_sa_obj_down", sa_obj_down)
# h5.put_array("lp_var_sa_ub_up", sa_ub_up)
# h5.put_array("lp_var_sa_ub_down", sa_ub_down)
# h5.put_array("lp_var_sa_lb_up", sa_lb_up)
# h5.put_array("lp_var_sa_lb_down", sa_lb_down)
end
@@ -250,7 +239,7 @@ function _extract_after_lp_constrs(model::JuMP.Model, h5)
duals = Float64[]
basis_status = []
constr_idx = 1
sensitivity_report = lp_sensitivity_report(model)
# sensitivity_report = lp_sensitivity_report(model)
for (ftype, stype) in JuMP.list_of_constraint_types(model)
for constr in JuMP.all_constraints(model, ftype, stype)
length(JuMP.name(constr)) > 0 || continue
@@ -268,21 +257,22 @@ function _extract_after_lp_constrs(model::JuMP.Model, h5)
error("Unknown basis status: $b")
end
# Sensitivity analysis
(delta_down, delta_up) = sensitivity_report[constr]
push!(sa_rhs_down, rhs[constr_idx] + delta_down)
push!(sa_rhs_up, rhs[constr_idx] + delta_up)
# # Sensitivity analysis
# (delta_down, delta_up) = sensitivity_report[constr]
# push!(sa_rhs_down, rhs[constr_idx] + delta_down)
# push!(sa_rhs_up, rhs[constr_idx] + delta_up)
constr_idx += 1
end
end
h5.put_array("lp_constr_dual_values", duals)
h5.put_array("lp_constr_basis_status", to_str_array(basis_status))
h5.put_array("lp_constr_sa_rhs_up", sa_rhs_up)
h5.put_array("lp_constr_sa_rhs_down", sa_rhs_down)
# h5.put_array("lp_constr_sa_rhs_up", sa_rhs_up)
# h5.put_array("lp_constr_sa_rhs_down", sa_rhs_down)
end
function _extract_after_mip(model::JuMP.Model, h5)
@info "_extract_after_mip"
h5.put_scalar("mip_obj_value", objective_value(model))
h5.put_scalar("mip_obj_bound", objective_bound(model))
h5.put_scalar("mip_wallclock_time", solve_time(model))
@@ -295,11 +285,9 @@ function _extract_after_mip(model::JuMP.Model, h5)
# Slacks
lhs = h5.get_sparse("static_constr_lhs")
if lhs !== nothing
rhs = h5.get_array("static_constr_rhs")
slacks = abs.(lhs * x - rhs)
h5.put_array("mip_constr_slacks", slacks)
end
rhs = h5.get_array("static_constr_rhs")
slacks = abs.(lhs * x - rhs)
h5.put_array("mip_constr_slacks", slacks)
# Cuts and lazy constraints
ext = model.ext[:miplearn]
@@ -310,7 +298,9 @@ end
function _fix_variables(model::JuMP.Model, var_names, var_values, stats)
vars = [variable_by_name(model, v) for v in var_names]
for (i, var) in enumerate(vars)
fix(var, var_values[i], force = true)
if isfinite(var_values[i])
fix(var, var_values[i], force=true)
end
end
end
@@ -427,7 +417,7 @@ function __init_solvers_jump__()
constrs_lhs,
constrs_sense,
constrs_rhs,
stats = nothing,
stats=nothing,
) = _add_constrs(
self.inner,
from_str_array(var_names),
@@ -443,14 +433,14 @@ function __init_solvers_jump__()
extract_after_mip(self, h5) = _extract_after_mip(self.inner, h5)
fix_variables(self, var_names, var_values, stats = nothing) =
fix_variables(self, var_names, var_values, stats=nothing) =
_fix_variables(self.inner, from_str_array(var_names), var_values, stats)
optimize(self) = _optimize(self.inner)
relax(self) = Class(_relax(self.inner))
set_warm_starts(self, var_names, var_values, stats = nothing) =
set_warm_starts(self, var_names, var_values, stats=nothing) =
_set_warm_starts(self.inner, from_str_array(var_names), var_values, stats)
write(self, filename) = _write(self.inner, filename)

View File

@@ -4,7 +4,9 @@ authors = ["Alinson S. Xavier <git@axavier.org>"]
version = "0.1.0"
[deps]
CSV = "336ed68f-0bac-5ca0-87d4-7b16caf5d00b"
Clp = "e2554f3b-3117-50c0-817c-e040a3ddf72d"
DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
GLPK = "60bf3e95-4087-53dc-ae20-288a0d20c6a6"
Glob = "c27321d9-0574-5035-807b-f59d2c89b15c"
HDF5 = "f67ccb44-e63f-5c2f-98bd-6dc0ccc4ba2f"

BIN
test/fixtures/stab/stab-n190-00000.h5 vendored Normal file

Binary file not shown.

Binary file not shown.

BIN
test/fixtures/stab/stab-n190-00002.h5 vendored Normal file

Binary file not shown.

Binary file not shown.

BIN
test/fixtures/stab/stab-n190-00003.h5 vendored Normal file

Binary file not shown.

Binary file not shown.

BIN
test/fixtures/stab/stab-n190-00004.h5 vendored Normal file

Binary file not shown.

Binary file not shown.

BIN
test/fixtures/stab/stab-n190-00005.h5 vendored Normal file

Binary file not shown.

Binary file not shown.

BIN
test/fixtures/stab/stab-n190-00006.h5 vendored Normal file

Binary file not shown.

Binary file not shown.

284
test/src/BB/tables.ipynb Normal file

File diff suppressed because one or more lines are too long

View File

@@ -10,9 +10,12 @@ using Test
using MIPLearn.BB
using MIPLearn
using CSV
using DataFrames
basepath = @__DIR__
function bb_run(optimizer_name, optimizer; large = true)
function bb_run(optimizer_name, optimizer; large=true)
@testset "Solve ($optimizer_name)" begin
@testset "interface" begin
filename = "$FIXTURES/danoint.mps.gz"
@@ -25,7 +28,7 @@ function bb_run(optimizer_name, optimizer; large = true)
status, obj = BB.solve_relaxation!(mip)
@test status == :Optimal
@test round(obj, digits = 6) == 62.637280
@test round(obj, digits=6) == 62.637280
@test BB.name(mip, mip.int_vars[1]) == "xab"
@test BB.name(mip, mip.int_vars[2]) == "xac"
@@ -35,26 +38,26 @@ function bb_run(optimizer_name, optimizer; large = true)
@test mip.int_vars_ub[1] == 1.0
vals = BB.values(mip, mip.int_vars)
@test round(vals[1], digits = 6) == 0.046933
@test round(vals[2], digits = 6) == 0.000841
@test round(vals[3], digits = 6) == 0.248696
@test round(vals[1], digits=6) == 0.046933
@test round(vals[2], digits=6) == 0.000841
@test round(vals[3], digits=6) == 0.248696
# Probe (up and down are feasible)
probe_up, probe_down = BB.probe(mip, mip.int_vars[1], 0.5, 0.0, 1.0, 1_000_000)
@test round(probe_down, digits = 6) == 62.690000
@test round(probe_up, digits = 6) == 62.714100
@test round(probe_down, digits=6) == 62.690000
@test round(probe_up, digits=6) == 62.714100
# Fix one variable to zero
BB.set_bounds!(mip, mip.int_vars[1:1], [0.0], [0.0])
status, obj = BB.solve_relaxation!(mip)
@test status == :Optimal
@test round(obj, digits = 6) == 62.690000
@test round(obj, digits=6) == 62.690000
# Fix one variable to one and another variable variable to zero
BB.set_bounds!(mip, mip.int_vars[1:2], [1.0, 0.0], [1.0, 0.0])
status, obj = BB.solve_relaxation!(mip)
@test status == :Optimal
@test round(obj, digits = 6) == 62.714777
@test round(obj, digits=6) == 62.714777
# Fix all binary variables to one, making problem infeasible
N = length(mip.int_vars)
@@ -68,7 +71,7 @@ function bb_run(optimizer_name, optimizer; large = true)
BB.set_bounds!(mip, mip.int_vars, zeros(N), ones(N))
status, obj = BB.solve_relaxation!(mip)
@test status == :Optimal
@test round(obj, digits = 6) == 62.637280
@test round(obj, digits=6) == 62.637280
end
@testset "varbranch" begin
@@ -82,8 +85,8 @@ function bb_run(optimizer_name, optimizer; large = true)
BB.StrongBranching(),
BB.ReliabilityBranching(),
BB.HybridBranching(),
BB.StrongBranching(aggregation = :min),
BB.ReliabilityBranching(aggregation = :min, collect = true),
BB.StrongBranching(aggregation=:min),
BB.ReliabilityBranching(aggregation=:min, collect=true),
]
h5 = H5File("$FIXTURES/$instance.h5")
mip_obj_bound = h5.get_scalar("mip_obj_bound")
@@ -104,13 +107,13 @@ function bb_run(optimizer_name, optimizer; large = true)
end
@testset "collect" begin
rule = BB.ReliabilityBranching(collect = true)
rule = BB.ReliabilityBranching(collect=true)
BB.collect!(
optimizer,
"$FIXTURES/bell5.mps.gz",
node_limit = 100,
print_interval = 10,
branch_rule = rule,
node_limit=100,
print_interval=10,
branch_rule=rule,
)
n_sb = rule.stats.num_strong_branch_calls
h5 = H5File("$FIXTURES/bell5.h5")
@@ -128,3 +131,67 @@ function test_bb()
@time bb_run("HiGHS", optimizer_with_attributes(HiGHS.Optimizer))
# @time bb_run("CPLEX", optimizer_with_attributes(CPLEX.Optimizer, "CPXPARAM_Threads" => 1))
end
function test_bb_replay()
rule_sb = BB.StrongBranching()
rule_rb = BB.ReliabilityBranching()
optimizer = optimizer_with_attributes(HiGHS.Optimizer)
filenames = [replace(f, ".h5" => "") for f in glob("test/fixtures/stab/*.h5")]
results_filename = "tmp.csv"
lk = ReentrantLock()
results = []
function push_result(r)
lock(lk) do
push!(results, r)
df = DataFrame()
for row in results
push!(df, row, cols=:union)
end
CSV.write(results_filename, df)
end
end
function solve(filename; replay=nothing, skip=false, rule)
has_replay = (replay !== nothing)
h5 = H5File("$filename.h5", "r")
mip_obj_bound = h5.get_scalar("mip_obj_bound")
@show filename
@show has_replay
h5.file.close()
mip = BB.init(optimizer)
BB.read!(mip, "$filename.mps.gz")
time_solve = @elapsed begin
pool, replay = BB.solve!(
mip,
initial_primal_bound=mip_obj_bound,
print_interval=100,
node_limit=1_000,
branch_rule=rule,
replay=replay,
)
end
if !skip
push_result(
Dict(
"Filename" => filename,
"Replay?" => has_replay,
"Solve time (s)" => time_solve,
"Relative MIP gap (%)" => round(pool.gap * 100, digits=3)
)
)
end
return replay
end
# Solve reference instance
replay = solve(filenames[1], skip=true, rule=rule_sb)
# Solve perturbations
for i in 2:6
solve(filenames[i], rule=rule_rb, replay=nothing)
solve(filenames[i], rule=rule_rb, replay=deepcopy(replay))
end
return
end

View File

@@ -24,7 +24,6 @@ include("Cuts/tableau/test_gmi_dual.jl")
include("problems/test_setcover.jl")
include("problems/test_stab.jl")
include("problems/test_tsp.jl")
include("problems/test_maxcut.jl")
include("solvers/test_jump.jl")
include("test_io.jl")
include("test_usage.jl")
@@ -38,7 +37,6 @@ function runtests()
test_problems_setcover()
test_problems_stab()
test_problems_tsp()
test_problems_maxcut()
test_solvers_jump()
test_usage()
test_cuts()

View File

@@ -1,54 +0,0 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020-2025, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
using PyCall
function test_problems_maxcut()
np = pyimport("numpy")
random = pyimport("random")
scipy_stats = pyimport("scipy.stats")
randint = scipy_stats.randint
uniform = scipy_stats.uniform
# Set random seed
random.seed(42)
np.random.seed(42)
# Build random instance
data = MaxCutGenerator(
n = randint(low = 10, high = 11),
p = uniform(loc = 0.5, scale = 0.0),
fix_graph = false,
).generate(
1,
)[1]
# Build model
model = build_maxcut_model_jump(data, optimizer = SCIP.Optimizer)
# Check static features
h5 = H5File(tempname(), "w")
model.extract_after_load(h5)
obj_linear = h5.get_array("static_var_obj_coeffs")
obj_quad = h5.get_array("static_var_obj_coeffs_quad")
@test obj_linear == [3.0, 1.0, 3.0, 1.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0]
@test obj_quad == [
0.0 0.0 -1.0 1.0 -1.0 0.0 0.0 0.0 -1.0 -1.0
0.0 0.0 1.0 -1.0 0.0 -1.0 -1.0 0.0 0.0 1.0
0.0 0.0 0.0 0.0 0.0 -1.0 0.0 0.0 -1.0 -1.0
0.0 0.0 0.0 0.0 0.0 -1.0 1.0 -1.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0
0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0
0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 -1.0
0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0
0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0
]
# Check optimal solution
model.optimize()
model.extract_after_mip(h5)
@test h5.get_scalar("mip_obj_value") == -4
h5.close()
end