Re-add BB module

master
Alinson S. Xavier 3 years ago
parent dabcfef00f
commit db6456dbaa
Signed by: isoron
GPG Key ID: 0DA8E4B9E1109DCA

@ -6,12 +6,17 @@ version = "0.3.0"
[deps] [deps]
CPLEX = "a076750e-1247-5638-91d2-ce28b192dca0" CPLEX = "a076750e-1247-5638-91d2-ce28b192dca0"
Conda = "8f4d0f93-b110-5947-807f-2305c1781a2d" Conda = "8f4d0f93-b110-5947-807f-2305c1781a2d"
DataStructures = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8"
HDF5 = "f67ccb44-e63f-5c2f-98bd-6dc0ccc4ba2f" HDF5 = "f67ccb44-e63f-5c2f-98bd-6dc0ccc4ba2f"
HiGHS = "87dc4568-4c63-4d18-b0c0-bb2238e4078b" HiGHS = "87dc4568-4c63-4d18-b0c0-bb2238e4078b"
JuMP = "4076af6c-e467-56ae-b986-b466b2749572" JuMP = "4076af6c-e467-56ae-b986-b466b2749572"
MathOptInterface = "b8f27783-ece8-5eb3-8dc8-9495eed66fee" MathOptInterface = "b8f27783-ece8-5eb3-8dc8-9495eed66fee"
Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7"
PyCall = "438e738f-606a-5dbb-bf0a-cddfbfd45ab0" PyCall = "438e738f-606a-5dbb-bf0a-cddfbfd45ab0"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
Requires = "ae029012-a4dd-5104-9daa-d747884805df"
SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
[compat] [compat]
JuMP = "1" JuMP = "1"

@ -0,0 +1,29 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
module BB
using Requires
frac(x) = x - floor(x)
include("structs.jl")
include("collect.jl")
include("nodepool.jl")
include("optimize.jl")
include("log.jl")
include("lp.jl")
include("varbranch/hybrid.jl")
include("varbranch/infeasibility.jl")
include("varbranch/pseudocost.jl")
include("varbranch/random.jl")
include("varbranch/reliability.jl")
include("varbranch/strong.jl")
function __init__()
@require CPLEX = "a076750e-1247-5638-91d2-ce28b192dca0" include("cplex.jl")
end
end # module

@ -0,0 +1,63 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
using Printf
using Base.Threads
import Base.Threads: @threads, nthreads, threadid
import ..H5File
function collect!(
optimizer,
filename::String;
time_limit::Float64 = Inf,
node_limit::Int = typemax(Int),
gap_limit::Float64 = 1e-4,
print_interval::Int = 5,
branch_rule::VariableBranchingRule = ReliabilityBranching(collect = true),
enable_plunging = true,
)::NodePool
model = read_from_file(filename)
mip = init(optimizer)
load!(mip, model)
h5 = H5File(replace(filename, ".mps.gz" => ".h5"), "r")
primal_bound = h5.get_scalar("mip_upper_bound")
if primal_bound === nothing
primal_bound = h5.get_scalar("mip_obj_value")
end
h5.file.close()
pool = solve!(
mip;
initial_primal_bound = primal_bound,
time_limit,
node_limit,
gap_limit,
print_interval,
branch_rule,
enable_plunging,
)
h5 = H5File(replace(filename, ".mps.gz" => ".h5"))
pseudocost_up = [NaN for _ = 1:mip.nvars]
pseudocost_down = [NaN for _ = 1:mip.nvars]
priorities = [0.0 for _ = 1:mip.nvars]
for (var, var_hist) in pool.var_history
pseudocost_up[var.index] = var_hist.pseudocost_up
pseudocost_down[var.index] = var_hist.pseudocost_down
x = mean(var_hist.fractional_values)
f_up = x - floor(x)
f_down = ceil(x) - x
priorities[var.index] =
var_hist.pseudocost_up * f_up * var_hist.pseudocost_down * f_down
end
h5.put_array("bb_var_pseudocost_up", pseudocost_up)
h5.put_array("bb_var_pseudocost_down", pseudocost_down)
h5.put_array("bb_var_priority", priorities)
collect!(branch_rule, h5)
h5.file.close()
return pool
end

@ -0,0 +1,33 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020-2022, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
using CPLEX
function _probe(
mip::MIP,
cpx::CPLEX.Optimizer,
var::Variable,
::Float64,
::Float64,
::Float64,
itlim::Int,
)::Tuple{Float64,Float64}
indices = [var.index - Cint(1)]
downobj, upobj, cnt = [0.0], [0.0], 1
status = CPXlpopt(cpx.env, cpx.lp)
status == 0 || error("CPXlpopt failed ($status)")
status = CPXstrongbranch(cpx.env, cpx.lp, indices, cnt, downobj, upobj, itlim)
status == 0 || error("CPXstrongbranch failed ($status)")
return upobj[1] * mip.sense, downobj[1] * mip.sense
end
function _relax_integrality!(cpx::CPLEX.Optimizer)::Nothing
status = CPXchgprobtype(cpx.env, cpx.lp, CPLEX.CPXPROB_LP)
status == 0 || error("CPXchgprobtype failed ($status)")
return
end

@ -0,0 +1,68 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
using Printf
function print_progress_header()
@printf(
"%8s %9s %9s %13s %13s %9s %6s %13s %6s %-24s %9s %9s %6s %6s",
"time",
"processed",
"pending",
"primal-bound",
"dual-bound",
"gap",
"node",
"obj",
"parent",
"branch-var",
"branch-lb",
"branch-ub",
"depth",
"iinfes"
)
println()
flush(stdout)
end
function print_progress(
pool::NodePool,
node::Node;
time_elapsed::Float64,
print_interval::Int,
primal_update::Bool,
)::Nothing
if (pool.processed % print_interval == 0) || isempty(pool.pending) || primal_update
if isempty(node.branch_vars)
branch_var_name = "---"
branch_lb = "---"
branch_ub = "---"
else
branch_var_name = name(node.mip, last(node.branch_vars))
L = min(24, length(branch_var_name))
branch_var_name = branch_var_name[1:L]
branch_lb = @sprintf("%9.2f", last(node.branch_lb))
branch_ub = @sprintf("%9.2f", last(node.branch_ub))
end
@printf(
"%8.2f %9d %9d %13.6e %13.6e %9.2e %6d %13.6e %6s %-24s %9s %9s %6d %6d",
time_elapsed,
pool.processed,
length(pool.processing) + length(pool.pending),
pool.primal_bound * node.mip.sense,
pool.dual_bound * node.mip.sense,
pool.gap,
node.index,
node.obj * node.mip.sense,
node.parent === nothing ? "---" : @sprintf("%d", node.parent.index),
branch_var_name,
branch_lb,
branch_ub,
length(node.branch_vars),
length(node.fractional_variables)
)
println()
flush(stdout)
end
end

@ -0,0 +1,269 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
import Base: values, convert
using Base.Threads
import Base.Threads: @threads, nthreads, threadid
using JuMP
using MathOptInterface
const MOI = MathOptInterface
function init(constructor)::MIP
return MIP(
constructor = constructor,
optimizers = Any[nothing for t = 1:nthreads()],
int_vars = Variable[],
int_vars_lb = Float64[],
int_vars_ub = Float64[],
sense = 1.0,
lp_iterations = 0,
nvars = 0,
)
end
function read!(mip::MIP, filename::AbstractString)::Nothing
load!(mip, read_from_file(filename))
return
end
function load!(mip::MIP, prototype::JuMP.Model)
mip.nvars = num_variables(prototype)
_replace_zero_one!(backend(prototype))
_assert_supported(backend(prototype))
mip.int_vars, mip.int_vars_lb, mip.int_vars_ub = _get_int_variables(backend(prototype))
mip.sense = _get_objective_sense(backend(prototype))
_relax_integrality!(backend(prototype))
@threads for t = 1:nthreads()
model = Model(mip.constructor)
MOI.copy_to(model, backend(prototype))
mip.optimizers[t] = backend(model)
set_silent(model)
end
return
end
function _assert_supported(optimizer::MOI.AbstractOptimizer)::Nothing
types = MOI.get(optimizer, MOI.ListOfConstraintTypesPresent())
for (F, S) in types
_assert_supported(F, S)
end
end
function _assert_supported(F::Type, S::Type)::Nothing
if F in [MOI.ScalarAffineFunction{Float64}, MOI.VariableIndex] && S in [
MOI.LessThan{Float64},
MOI.GreaterThan{Float64},
MOI.EqualTo{Float64},
MOI.Interval{Float64},
]
return
end
if F in [MOI.VariableIndex] && S in [MOI.Integer, MOI.ZeroOne]
return
end
error("MOI constraint not supported: $F in $S")
end
function _get_objective_sense(optimizer::MOI.AbstractOptimizer)::Float64
sense = MOI.get(optimizer, MOI.ObjectiveSense())
if sense == MOI.MIN_SENSE
return 1.0
elseif sense == MOI.MAX_SENSE
return -1.0
else
error("objective sense not supported: $sense")
end
end
_interval_index(v::Variable) =
MOI.ConstraintIndex{MOI.VariableIndex,MOI.Interval{Float64}}(v.index)
_lower_bound_index(v::Variable) =
MOI.ConstraintIndex{MOI.VariableIndex,MOI.GreaterThan{Float64}}(v.index)
_upper_bound_index(v::Variable) =
MOI.ConstraintIndex{MOI.VariableIndex,MOI.LessThan{Float64}}(v.index)
function _replace_zero_one!(optimizer::MOI.AbstractOptimizer)::Nothing
constrs_to_delete = MOI.ConstraintIndex[]
funcs = MOI.VariableIndex[]
sets = Union{MOI.Interval,MOI.Integer}[]
for ci in
MOI.get(optimizer, MOI.ListOfConstraintIndices{MOI.VariableIndex,MOI.ZeroOne}())
func = MOI.get(optimizer, MOI.ConstraintFunction(), ci)
var = func.value
push!(constrs_to_delete, ci)
push!(funcs, MOI.VariableIndex(var))
push!(funcs, MOI.VariableIndex(var))
push!(sets, MOI.Interval{Float64}(0.0, 1.0))
push!(sets, MOI.Integer())
end
MOI.delete(optimizer, constrs_to_delete)
MOI.add_constraints(optimizer, funcs, sets)
return
end
function _get_int_variables(
optimizer::MOI.AbstractOptimizer,
)::Tuple{Vector{Variable},Vector{Float64},Vector{Float64}}
vars = Variable[]
lb = Float64[]
ub = Float64[]
for ci in
MOI.get(optimizer, MOI.ListOfConstraintIndices{MOI.VariableIndex,MOI.Integer}())
func = MOI.get(optimizer, MOI.ConstraintFunction(), ci)
var = Variable(func.value)
var_lb, var_ub = -Inf, Inf
if MOI.is_valid(optimizer, _interval_index(var))
constr = MOI.get(optimizer, MOI.ConstraintSet(), _interval_index(var))
var_ub = constr.upper
var_lb = constr.lower
else
# If interval constraint is not found, query individual lower/upper bound
# constraints and replace them by an interval constraint.
if MOI.is_valid(optimizer, _lower_bound_index(var))
constr = MOI.get(optimizer, MOI.ConstraintSet(), _lower_bound_index(var))
var_lb = constr.lower
MOI.delete(optimizer, _lower_bound_index(var))
end
if MOI.is_valid(optimizer, _upper_bound_index(var))
constr = MOI.get(optimizer, MOI.ConstraintSet(), _upper_bound_index(var))
var_ub = constr.upper
MOI.delete(optimizer, _upper_bound_index(var))
end
MOI.add_constraint(optimizer, var, MOI.Interval(var_lb, var_ub))
end
push!(vars, var)
push!(lb, var_lb)
push!(ub, var_ub)
end
return vars, lb, ub
end
function _relax_integrality!(optimizer::MOI.AbstractOptimizer)::Nothing
indices =
MOI.get(optimizer, MOI.ListOfConstraintIndices{MOI.VariableIndex,MOI.Integer}())
MOI.delete(optimizer, indices)
end
"""
solve_relaxation(mip::MIP)::Tuple{Symbol, Float64}
Solve the linear relaxation of `mip` and returns a tuple containing the
solution status (either :Optimal or :Infeasible) and the optimal objective
value. If the problem is infeasible, the optimal value is Inf for minimization
problems and -Inf for maximization problems..
"""
function solve_relaxation!(mip::MIP)::Tuple{Symbol,Float64}
t = threadid()
MOI.optimize!(mip.optimizers[t])
try
mip.lp_iterations += MOI.get(mip.optimizers[t], MOI.SimplexIterations())
catch
# ignore
end
status = MOI.get(mip.optimizers[t], MOI.TerminationStatus())
if status == MOI.OPTIMAL
obj = MOI.get(mip.optimizers[t], MOI.ObjectiveValue())
return :Optimal, obj * mip.sense
elseif status in [MOI.INFEASIBLE, MOI.INFEASIBLE_OR_UNBOUNDED]
return :Infeasible, Inf * mip.sense
end
error("unknown status: $status")
end
"""
values(mip::MIP, vars::Vector{Variable})::Array{Float64}
Returns a vector `vals` which describes the current primal values for the
decision variables `vars`. More specifically, `vals[j]` is the current
primal value of `vars[j]`.
"""
function values(mip::MIP, vars::Vector{Variable})::Array{Float64}
return MOI.get(
mip.optimizers[threadid()],
MOI.VariablePrimal(),
[MOI.VariableIndex(v.index) for v in vars],
)
end
values(mip::MIP) =
values(mip, MOI.get(mip.optimizers[threadid()], MOI.ListOfVariableIndices()))
"""
set_bounds!(mip::MIP,
vars::Vector{Variable},
lb::Array{Float64},
ub::Array{Float64})
Modify the bounds of the given variables. More specifically, sets
upper and lower bounds of `vars[j]` to `ub[j]` and `lb[j]`, respectively.
"""
function set_bounds!(
mip::MIP,
vars::Vector{Variable},
lb::Array{Float64},
ub::Array{Float64},
)::Nothing
t = threadid()
for j = 1:length(vars)
MOI.delete(mip.optimizers[t], _interval_index(vars[j]))
MOI.add_constraint(
mip.optimizers[t],
MOI.VariableIndex(vars[j].index),
MOI.Interval(lb[j], ub[j]),
)
end
return
end
"""
name(mip::MIP, var::Variable)::String
Return the name of the decision variable `var`.
"""
function name(mip::MIP, var::Variable)::String
t = threadid()
return MOI.get(mip.optimizers[t], MOI.VariableName(), MOI.VariableIndex(var.index))
end
"""
probe(mip::MIP, var, x, lb, ub, max_iterations)::Tuple{Float64, Float64}
Suppose that the LP relaxation of `mip` has been solved and that `var` holds
a fractional value `x`. This function returns two numbers corresponding,
respectively, to the the optimal values of the LP relaxations having the
constraints `ceil(x) <= var <= ub` and `lb <= var <= floor(x)` enforced.
If any branch is infeasible, the optimal value for that branch is Inf for
minimization problems and -Inf for maximization problems.
"""
function probe(
mip::MIP,
var::Variable,
x::Float64,
lb::Float64,
ub::Float64,
max_iterations::Int,
)::Tuple{Float64,Float64}
return _probe(mip, mip.optimizers[threadid()], var, x, lb, ub, max_iterations)
end
function _probe(
mip::MIP,
_,
var::Variable,
x::Float64,
lb::Float64,
ub::Float64,
::Int,
)::Tuple{Float64,Float64}
set_bounds!(mip, [var], [ceil(x)], [ceil(x)])
_, obj_up = solve_relaxation!(mip)
set_bounds!(mip, [var], [floor(x)], [floor(x)])
_, obj_down = solve_relaxation!(mip)
set_bounds!(mip, [var], [lb], [ub])
return obj_up * mip.sense, obj_down * mip.sense
end

@ -0,0 +1,186 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
using Statistics
using DataStructures
import Base.Threads: threadid
function take(
pool::NodePool;
suggestions::Array{Node} = [],
time_remaining::Float64,
gap_limit::Float64,
node_limit::Int,
)::Union{Symbol,Node}
t = threadid()
lock(pool.lock) do
n_processing = length(pool.processing)
if (
(pool.gap < gap_limit) ||
(n_processing + pool.processed >= node_limit) ||
(time_remaining < 0)
)
return :END
end
if isempty(pool.pending)
if isempty(pool.processing)
return :END
else
return :WAIT
end
else
# If one of the suggested nodes is still pending, return it.
# This is known in the literature as plunging.
for s in suggestions
if s in keys(pool.pending)
delete!(pool.pending, s)
pool.processing[s] = s.obj
return s
end
end
# If all suggestions have already been processed
# or pruned, find another node based on best bound.
node = dequeue!(pool.pending)
pool.processing[node] = node.obj
return node
end
end
end
function offer(
pool::NodePool;
parent_node::Union{Nothing,Node},
child_nodes::Vector{Node},
time_elapsed::Float64 = 0.0,
print_interval::Int = 100,
)::Nothing
lock(pool.lock) do
primal_update = false
# Update node.processing and node.processed
if parent_node !== nothing
pool.processed += 1
delete!(pool.processing, parent_node)
end
# Queue child nodes
for node in child_nodes
if node.status == :Infeasible
continue
end
if node.obj >= pool.primal_bound - 1e-6
continue
end
if isempty(node.fractional_variables)
pool.primal_bound = min(pool.primal_bound, node.obj)
primal_update = true
continue
end
pool.pending[node] = node.obj
end
# Update dual bound
pool.dual_bound = pool.primal_bound
if !isempty(pool.pending)
pool.dual_bound = min(pool.dual_bound, peek(pool.pending)[2])
end
if !isempty(pool.processing)
pool.dual_bound = min(pool.dual_bound, peek(pool.processing)[2])
end
# Update gap
if pool.primal_bound == pool.dual_bound
pool.gap = 0
else
pool.gap = abs((pool.primal_bound - pool.dual_bound) / pool.primal_bound)
end
if parent_node !== nothing
# Update branching variable history
branch_var = child_nodes[1].branch_vars[end]
offset = findfirst(isequal(branch_var), parent_node.fractional_variables)
x = parent_node.fractional_values[offset]
obj_change_up = child_nodes[1].obj - parent_node.obj
obj_change_down = child_nodes[2].obj - parent_node.obj
_update_var_history(
pool = pool,
var = branch_var,
x = x,
obj_change_down = obj_change_down,
obj_change_up = obj_change_up,
)
# Update global history
pool.history.avg_pseudocost_up =
mean(vh.pseudocost_up for vh in values(pool.var_history))
pool.history.avg_pseudocost_down =
mean(vh.pseudocost_down for vh in values(pool.var_history))
end
for node in child_nodes
print_progress(
pool,
node,
time_elapsed = time_elapsed,
print_interval = print_interval,
primal_update = isfinite(node.obj) && isempty(node.fractional_variables),
)
end
end
return
end
function _update_var_history(;
pool::NodePool,
var::Variable,
x::Float64,
obj_change_down::Float64,
obj_change_up::Float64,
)::Nothing
# Create new history entry
if var keys(pool.var_history)
pool.var_history[var] = VariableHistory()
end
varhist = pool.var_history[var]
# Push fractional value
push!(varhist.fractional_values, x)
# Push objective value changes
push!(varhist.obj_change_up, obj_change_up)
push!(varhist.obj_change_down, obj_change_down)
# Push objective change ratios
f_up = x - floor(x)
f_down = ceil(x) - x
if isfinite(obj_change_up)
push!(varhist.obj_ratio_up, obj_change_up / f_up)
end
if isfinite(obj_change_down)
push!(varhist.obj_ratio_down, obj_change_down / f_down)
end
# Update variable pseudocosts
varhist.pseudocost_up = 0.0
varhist.pseudocost_down = 0.0
if !isempty(varhist.obj_ratio_up)
varhist.pseudocost_up = sum(varhist.obj_ratio_up) / length(varhist.obj_ratio_up)
end
if !isempty(varhist.obj_ratio_down)
varhist.pseudocost_down =
sum(varhist.obj_ratio_down) / length(varhist.obj_ratio_down)
end
return
end
function generate_indices(pool::NodePool, n::Int)::Vector{Int}
lock(pool.lock) do
result = Int[]
for i = 1:n
push!(result, pool.next_index)
pool.next_index += 1
end
return result
end
end

@ -0,0 +1,169 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
using Printf
using Base.Threads
import Base.Threads: @threads, nthreads, threadid
import ..H5File
function solve!(
mip::MIP;
time_limit::Float64 = Inf,
node_limit::Int = typemax(Int),
gap_limit::Float64 = 1e-4,
print_interval::Int = 5,
initial_primal_bound::Float64 = Inf,
branch_rule::VariableBranchingRule = ReliabilityBranching(),
enable_plunging = true,
)::NodePool
time_initial = time()
pool = NodePool(mip = mip)
pool.primal_bound = initial_primal_bound
root_node = _create_node(mip)
if isempty(root_node.fractional_variables)
println("root relaxation is integer feasible")
pool.dual_bound = root_node.obj
pool.primal_bound = root_node.obj
return pool
else
print_progress_header()
end
offer(
pool,
parent_node = nothing,
child_nodes = [root_node],
print_interval = print_interval,
)
@threads for t = 1:nthreads()
child_one, child_zero, suggestions = nothing, nothing, Node[]
while true
time_elapsed = time() - time_initial
if enable_plunging && (child_one !== nothing)
suggestions = Node[child_one, child_zero]
end
node = take(
pool,
suggestions = suggestions,
time_remaining = time_limit - time_elapsed,
node_limit = node_limit,
gap_limit = gap_limit,
)
if node == :END
break
elseif node == :WAIT
sleep(0.1)
continue
else
# Assert node is feasible
_set_node_bounds(node)
status, _ = solve_relaxation!(mip)
@assert status == :Optimal
_unset_node_bounds(node)
# Find branching variable
ids = generate_indices(pool, 2)
branch_var = find_branching_var(branch_rule, node, pool)
# Find current variable lower and upper bounds
offset = findfirst(isequal(branch_var), mip.int_vars)
var_lb = mip.int_vars_lb[offset]
var_ub = mip.int_vars_ub[offset]
for (offset, v) in enumerate(node.branch_vars)
if v == branch_var
var_lb = max(var_lb, node.branch_lb[offset])
var_ub = min(var_ub, node.branch_ub[offset])
end
end
# Query current fractional value
offset = findfirst(isequal(branch_var), node.fractional_variables)
var_value = node.fractional_values[offset]
child_zero = _create_node(
mip,
index = ids[2],
parent = node,
branch_var = branch_var,
branch_var_lb = var_lb,
branch_var_ub = floor(var_value),
)
child_one = _create_node(
mip,
index = ids[1],
parent = node,
branch_var = branch_var,
branch_var_lb = ceil(var_value),
branch_var_ub = var_ub,
)
offer(
pool,
parent_node = node,
child_nodes = [child_one, child_zero],
time_elapsed = time_elapsed,
print_interval = print_interval,
)
end
end
end
return pool
end
function _create_node(
mip;
index::Int = 0,
parent::Union{Nothing,Node} = nothing,
branch_var::Union{Nothing,Variable} = nothing,
branch_var_lb::Union{Nothing,Float64} = nothing,
branch_var_ub::Union{Nothing,Float64} = nothing,
)::Node
if parent === nothing
branch_vars = Variable[]
branch_lb = Float64[]
branch_ub = Float64[]
depth = 1
else
branch_vars = [parent.branch_vars; branch_var]
branch_lb = [parent.branch_lb; branch_var_lb]
branch_ub = [parent.branch_ub; branch_var_ub]
depth = parent.depth + 1
end
set_bounds!(mip, branch_vars, branch_lb, branch_ub)
status, obj = solve_relaxation!(mip)
if status == :Optimal
vals = values(mip, mip.int_vars)
fractional_indices = [
j for j in 1:length(mip.int_vars) if 1e-6 < vals[j] - floor(vals[j]) < 1 - 1e-6
]
fractional_values = vals[fractional_indices]
fractional_variables = mip.int_vars[fractional_indices]
else
fractional_variables = Variable[]
fractional_values = Float64[]
end
set_bounds!(mip, mip.int_vars, mip.int_vars_lb, mip.int_vars_ub)
return Node(
mip,
index,
depth,
obj,
status,
branch_vars,
branch_lb,
branch_ub,
fractional_variables,
fractional_values,
parent,
)
end
function _set_node_bounds(node::Node)
set_bounds!(node.mip, node.branch_vars, node.branch_lb, node.branch_ub)
end
function _unset_node_bounds(node::Node)
set_bounds!(node.mip, node.mip.int_vars, node.mip.int_vars_lb, node.mip.int_vars_ub)
end

@ -0,0 +1,74 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
using DataStructures
abstract type VariableBranchingRule end
struct Variable
index::Any
end
Base.@kwdef mutable struct MIP
constructor::Any
optimizers::Vector
int_vars::Vector{Variable}
int_vars_lb::Vector{Float64}
int_vars_ub::Vector{Float64}
sense::Float64
lp_iterations::Int64
nvars::Int
end
struct Node
mip::MIP
index::Int
depth::Int
obj::Float64
status::Symbol
branch_vars::Array{Variable}
branch_lb::Array{Float64}
branch_ub::Array{Float64}
fractional_variables::Array{Variable}
fractional_values::Array{Float64}
parent::Union{Nothing,Node}
end
Base.@kwdef mutable struct History
avg_pseudocost_up::Float64 = 1.0
avg_pseudocost_down::Float64 = 1.0
end
mutable struct VariableHistory
fractional_values::Array{Float64}
obj_change_up::Array{Float64}
obj_change_down::Array{Float64}
obj_ratio_up::Array{Float64}
obj_ratio_down::Array{Float64}
pseudocost_up::Float64
pseudocost_down::Float64
VariableHistory() = new(
Float64[], # fractional_values
Float64[], # obj_change_up
Float64[], # obj_change_down
Float64[], # obj_ratio_up
Float64[], # obj_ratio_up
0.0, # pseudocost_up
0.0, # pseudocost_down
)
end
Base.@kwdef mutable struct NodePool
mip::MIP
pending::PriorityQueue{Node,Float64} = PriorityQueue{Node,Float64}()
processing::PriorityQueue{Node,Float64} = PriorityQueue{Node,Float64}()
processed::Int = 0
next_index::Int = 1
lock::ReentrantLock = ReentrantLock()
primal_bound::Float64 = Inf
dual_bound::Float64 = Inf
gap::Float64 = Inf
history::History = History()
var_history::Dict{Variable,VariableHistory} = Dict()
end

@ -0,0 +1,31 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
"""
HybridBranching(depth_cutoff::Int,
shallow_rule::VariableBranchingRule,
deep_rule::::VariableBranchingRule)
Branching strategy that switches between two variable branching strategies,
according to the depth of the node.
More specifically, if `node.depth <= depth_cutoff`, then `shallow_rule` is
applied. Otherwise, `deep_rule` is applied.
"""
mutable struct HybridBranching <: VariableBranchingRule
depth_cutoff::Int
shallow_rule::VariableBranchingRule
deep_rule::VariableBranchingRule
end
HybridBranching(depth_cutoff::Int = 10) =
HybridBranching(depth_cutoff, StrongBranching(), PseudocostBranching())
function find_branching_var(rule::HybridBranching, node::Node, pool::NodePool)::Variable
if node.depth <= rule.depth_cutoff
return find_branching_var(rule.shallow_rule, node, pool)
else
return find_branching_var(rule.deep_rule, node, pool)
end
end

@ -0,0 +1,54 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
"""
FirstInfeasibleBranching()
Branching rule that always selects the first fractional variable.
"""
struct FirstInfeasibleBranching <: VariableBranchingRule end
function find_branching_var(
rule::FirstInfeasibleBranching,
node::Node,
pool::NodePool,
)::Variable
return node.fractional_variables[1]
end
"""
LeastInfeasibleBranching()
Branching strategy that select the fractional variable whose value is the closest
to an integral value.
"""
struct LeastInfeasibleBranching <: VariableBranchingRule end
function find_branching_var(
rule::LeastInfeasibleBranching,
node::Node,
pool::NodePool,
)::Variable
scores = [max(v - floor(v), ceil(v) - v) for v in node.fractional_values]
_, max_offset = findmax(scores)
return node.fractional_variables[max_offset]
end
"""
MostInfeasibleBranching()
Branching strategy that selects the fractional variable whose value is closest
to 1/2.
"""
struct MostInfeasibleBranching <: VariableBranchingRule end
function find_branching_var(
rule::MostInfeasibleBranching,
node::Node,
pool::NodePool,
)::Variable
scores = [min(v - floor(v), ceil(v) - v) for v in node.fractional_values]
_, max_offset = findmax(scores)
return node.fractional_variables[max_offset]
end

@ -0,0 +1,45 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
"""
PseudocostBranching()
Branching strategy that uses historical changes in objective value to estimate
strong branching scores at lower computational cost.
"""
struct PseudocostBranching <: VariableBranchingRule end
function find_branching_var(rule::PseudocostBranching, node::Node, pool::NodePool)::Variable
scores = [
_pseudocost_score(
node,
pool,
node.fractional_variables[j],
node.fractional_values[j],
) for j = 1:length(node.fractional_variables)
]
_, max_offset = findmax(scores)
return node.fractional_variables[max_offset]
end
function _pseudocost_score(
node::Node,
pool::NodePool,
var::Variable,
x::Float64,
)::Tuple{Float64,Int}
f_up = x - floor(x)
f_down = ceil(x) - x
pseudo_up = pool.history.avg_pseudocost_up * f_up
pseudo_down = pool.history.avg_pseudocost_down * f_down
if var in keys(pool.var_history)
if isfinite(pool.var_history[var].pseudocost_up)
pseudo_up = pool.var_history[var].pseudocost_up * f_up
end
if isfinite(pool.var_history[var].pseudocost_down)
pseudo_down = pool.var_history[var].pseudocost_down * f_down
end
end
return (pseudo_up * f_up * pseudo_down * f_down, var.index)
end

@ -0,0 +1,17 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
using Random
"""
RandomBranching()
Branching strategy that picks a fractional variable randomly.
"""
struct RandomBranching <: VariableBranchingRule end
function find_branching_var(rule::RandomBranching, node::Node, pool::NodePool)::Variable
return shuffle(node.fractional_variables)[1]
end

@ -0,0 +1,191 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
import ..to_str_array
Base.@kwdef mutable struct ReliabilityBranchingStats
branched_count::Vector{Int} = []
num_strong_branch_calls = 0
score_var_names::Vector{String} = []
score_features::Vector{Vector{Float32}} = []
score_targets::Vector{Float32} = []
end
"""
ReliabilityBranching
Branching strategy that uses pseudocosts if there are enough observations
to make an accurate prediction of strong branching scores, or runs the
actual strong branching routine if not enough observations are available.
"""
Base.@kwdef mutable struct ReliabilityBranching <: VariableBranchingRule
min_samples::Int = 8
max_sb_calls::Int = 100
look_ahead::Int = 10
side_effect::Bool = true
max_iterations::Int = 1_000_000
aggregation::Symbol = :prod
stats::ReliabilityBranchingStats = ReliabilityBranchingStats()
collect::Bool = false
end
function _strong_branch_score(;
node::Node,
pool::NodePool,
var::Variable,
x::Float64,
side_effect::Bool,
max_iterations::Int,
aggregation::Symbol,
)::Tuple{Float64,Int}
# Find current variable lower and upper bounds
offset = findfirst(isequal(var), node.mip.int_vars)
var_lb = node.mip.int_vars_lb[offset]
var_ub = node.mip.int_vars_ub[offset]
for (offset, v) in enumerate(node.branch_vars)
if v == var
var_lb = max(var_lb, node.branch_lb[offset])
var_ub = min(var_ub, node.branch_ub[offset])
end
end
obj_up, obj_down = 0, 0
obj_up, obj_down = probe(node.mip, var, x, var_lb, var_ub, max_iterations)
obj_change_up = obj_up - node.obj
obj_change_down = obj_down - node.obj
if side_effect
_update_var_history(
pool = pool,
var = var,
x = x,
obj_change_down = obj_change_down,
obj_change_up = obj_change_up,
)
end
if aggregation == :prod
return (obj_change_up * obj_change_down, var.index)
elseif aggregation == :min
sense = node.mip.sense
return (min(sense * obj_up, sense * obj_down), var.index)
else
error("Unknown aggregation: $aggregation")
end
end
function find_branching_var(
rule::ReliabilityBranching,
node::Node,
pool::NodePool,
)::Variable
stats = rule.stats
# Initialize statistics
if isempty(stats.branched_count)
stats.branched_count = zeros(node.mip.nvars)
end
# Sort variables by pseudocost score
nfrac = length(node.fractional_variables)
pseudocost_scores = [
_pseudocost_score(
node,
pool,
node.fractional_variables[j],
node.fractional_values[j],
) for j = 1:nfrac
]
σ = sortperm(pseudocost_scores, rev = true)
sorted_vars = node.fractional_variables[σ]
if rule.collect
# Compute dynamic features for all fractional variables
features = []
for (i, var) in enumerate(sorted_vars)
branched_count = stats.branched_count[var.index]
branched_count_rel = 0.0
branched_count_sum = sum(stats.branched_count[var.index])
if branched_count_sum > 0
branched_count_rel = branched_count / branched_count_sum
end
push!(
features,
Float32[
nfrac,
node.fractional_values[σ[i]],
node.depth,
pseudocost_scores[σ[i]][1],
branched_count,
branched_count_rel,
],
)
end
end
_set_node_bounds(node)
no_improv_count, n_sb_calls = 0, 0
max_score, max_var = (-Inf, -Inf), sorted_vars[1]
for (i, var) in enumerate(sorted_vars)
# Decide whether to use strong branching
use_strong_branch = true
if n_sb_calls >= rule.max_sb_calls
use_strong_branch = false
else
if var in keys(pool.var_history)
varhist = pool.var_history[var]
hlength = min(length(varhist.obj_ratio_up), length(varhist.obj_ratio_down))
if hlength >= rule.min_samples
use_strong_branch = false
end
end
end
if use_strong_branch
# Compute strong branching score
n_sb_calls += 1
score = _strong_branch_score(
node = node,
pool = pool,
var = var,
x = node.fractional_values[σ[i]],
side_effect = rule.side_effect,
max_iterations = rule.max_iterations,
aggregation = rule.aggregation,
)
if rule.collect
# Store training data
push!(stats.score_var_names, name(node.mip, var))
push!(stats.score_features, features[i])
push!(stats.score_targets, score[1])
end
else
score = pseudocost_scores[σ[i]]
end
if score > max_score
max_score, max_var = score, var
no_improv_count = 0
else
no_improv_count += 1
end
no_improv_count <= rule.look_ahead || break
end
_unset_node_bounds(node)
# Update statistics
stats.branched_count[max_var.index] += 1
stats.num_strong_branch_calls += n_sb_calls
return max_var
end
function collect!(rule::ReliabilityBranching, h5)
if rule.stats.num_strong_branch_calls == 0
return
end
h5.put_array("bb_score_var_names", to_str_array(rule.stats.score_var_names))
h5.put_array("bb_score_features", vcat(rule.stats.score_features'...))
h5.put_array("bb_score_targets", rule.stats.score_targets)
end

@ -0,0 +1,32 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
using Random
"""
StrongBranching(look_ahead::Int, max_calls::Int)
Branching strategy that selects a subset of fractional variables
as candidates (according to pseudocosts) the solves two linear
programming problems for each candidate.
"""
Base.@kwdef struct StrongBranching <: VariableBranchingRule
look_ahead::Int = 10
max_calls::Int = 100
side_effect::Bool = true
max_iterations::Int = 1_000_000
aggregation::Symbol = :prod
end
function find_branching_var(rule::StrongBranching, node::Node, pool::NodePool)::Variable
rb_rule = ReliabilityBranching(
min_samples = typemax(Int),
max_sb_calls = rule.max_calls,
look_ahead = rule.look_ahead,
side_effect = rule.side_effect,
max_iterations = rule.max_iterations,
aggregation = rule.aggregation,
)
return find_branching_var(rb_rule, node, pool)
end

@ -7,8 +7,6 @@ module MIPLearn
using PyCall using PyCall
using SparseArrays using SparseArrays
include("Cuts/BlackBox/cplex.jl")
include("collectors.jl") include("collectors.jl")
include("components.jl") include("components.jl")
include("extractors.jl") include("extractors.jl")
@ -27,4 +25,7 @@ function __init__()
__init_solvers_learning__() __init_solvers_learning__()
end end
include("BB/BB.jl")
include("Cuts/BlackBox/cplex.jl")
end # module end # module

@ -4,6 +4,8 @@ authors = ["Alinson S. Xavier <git@axavier.org>"]
version = "0.1.0" version = "0.1.0"
[deps] [deps]
CPLEX = "a076750e-1247-5638-91d2-ce28b192dca0"
Clp = "e2554f3b-3117-50c0-817c-e040a3ddf72d"
Glob = "c27321d9-0574-5035-807b-f59d2c89b15c" Glob = "c27321d9-0574-5035-807b-f59d2c89b15c"
HDF5 = "f67ccb44-e63f-5c2f-98bd-6dc0ccc4ba2f" HDF5 = "f67ccb44-e63f-5c2f-98bd-6dc0ccc4ba2f"
HiGHS = "87dc4568-4c63-4d18-b0c0-bb2238e4078b" HiGHS = "87dc4568-4c63-4d18-b0c0-bb2238e4078b"

Binary file not shown.

@ -0,0 +1,134 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
using Clp
using CPLEX
using HiGHS
using JuMP
using Test
using MIPLearn.BB
using MIPLearn
basepath = @__DIR__
function bb_run(optimizer_name, optimizer; large = true)
@testset "Solve ($optimizer_name)" begin
@testset "interface" begin
filename = "$FIXTURES/danoint.mps.gz"
mip = BB.init(optimizer)
BB.read!(mip, filename)
@test mip.sense == 1.0
@test length(mip.int_vars) == 56
status, obj = BB.solve_relaxation!(mip)
@test status == :Optimal
@test round(obj, digits = 6) == 62.637280
@test BB.name(mip, mip.int_vars[1]) == "xab"
@test BB.name(mip, mip.int_vars[2]) == "xac"
@test BB.name(mip, mip.int_vars[3]) == "xad"
@test mip.int_vars_lb[1] == 0.0
@test mip.int_vars_ub[1] == 1.0
vals = BB.values(mip, mip.int_vars)
@test round(vals[1], digits = 6) == 0.046933
@test round(vals[2], digits = 6) == 0.000841
@test round(vals[3], digits = 6) == 0.248696
# Probe (up and down are feasible)
probe_up, probe_down = BB.probe(mip, mip.int_vars[1], 0.5, 0.0, 1.0, 1_000_000)
@test round(probe_down, digits = 6) == 62.690000
@test round(probe_up, digits = 6) == 62.714100
# Fix one variable to zero
BB.set_bounds!(mip, mip.int_vars[1:1], [0.0], [0.0])
status, obj = BB.solve_relaxation!(mip)
@test status == :Optimal
@test round(obj, digits = 6) == 62.690000
# Fix one variable to one and another variable variable to zero
BB.set_bounds!(mip, mip.int_vars[1:2], [1.0, 0.0], [1.0, 0.0])
status, obj = BB.solve_relaxation!(mip)
@test status == :Optimal
@test round(obj, digits = 6) == 62.714777
# Fix all binary variables to one, making problem infeasible
N = length(mip.int_vars)
BB.set_bounds!(mip, mip.int_vars, ones(N), ones(N))
status, obj = BB.solve_relaxation!(mip)
@test status == :Infeasible
@test obj == Inf
# Restore original problem
N = length(mip.int_vars)
BB.set_bounds!(mip, mip.int_vars, zeros(N), ones(N))
status, obj = BB.solve_relaxation!(mip)
@test status == :Optimal
@test round(obj, digits = 6) == 62.637280
end
@testset "varbranch" begin
for instance in ["bell5", "vpm2"]
for branch_rule in [
BB.RandomBranching(),
BB.FirstInfeasibleBranching(),
BB.LeastInfeasibleBranching(),
BB.MostInfeasibleBranching(),
BB.PseudocostBranching(),
BB.StrongBranching(),
BB.ReliabilityBranching(),
BB.HybridBranching(),
BB.StrongBranching(aggregation = :min),
BB.ReliabilityBranching(aggregation = :min, collect = true),
]
h5 = H5File("$FIXTURES/$instance.h5")
mip_lower_bound = h5.get_scalar("mip_lower_bound")
mip_upper_bound = h5.get_scalar("mip_upper_bound")
mip_sense = h5.get_scalar("mip_sense")
mip_primal_bound =
mip_sense == "min" ? mip_upper_bound : mip_lower_bound
h5.file.close()
mip = BB.init(optimizer)
BB.read!(mip, "$FIXTURES/$instance.mps.gz")
@info optimizer_name, branch_rule, instance
@time BB.solve!(
mip,
initial_primal_bound = mip_primal_bound,
print_interval = 10,
node_limit = 100,
branch_rule = branch_rule,
)
end
end
end
@testset "collect" begin
rule = BB.ReliabilityBranching(collect = true)
BB.collect!(
optimizer,
"$FIXTURES/bell5.mps.gz",
node_limit = 100,
print_interval = 10,
branch_rule = rule,
)
n_sb = rule.stats.num_strong_branch_calls
h5 = H5File("$FIXTURES/bell5.h5")
@test size(h5.get_array("bb_var_pseudocost_up")) == (104,)
@test size(h5.get_array("bb_score_var_names")) == (n_sb,)
@test size(h5.get_array("bb_score_features")) == (n_sb, 6)
@test size(h5.get_array("bb_score_targets")) == (n_sb,)
h5.file.close()
end
end
end
function test_bb()
@time bb_run("Clp", optimizer_with_attributes(Clp.Optimizer))
@time bb_run("HiGHS", optimizer_with_attributes(HiGHS.Optimizer))
@time bb_run("CPLEX", optimizer_with_attributes(CPLEX.Optimizer, "CPXPARAM_Threads" => 1))
end

@ -11,6 +11,7 @@ FIXTURES = "$BASEDIR/../fixtures"
include("fixtures.jl") include("fixtures.jl")
include("BB/test_bb.jl")
include("Cuts/BlackBox/test_cplex.jl") include("Cuts/BlackBox/test_cplex.jl")
include("problems/test_setcover.jl") include("problems/test_setcover.jl")
include("test_io.jl") include("test_io.jl")
@ -19,6 +20,9 @@ include("test_usage.jl")
function runtests() function runtests()
@testset "MIPLearn" begin @testset "MIPLearn" begin
@testset "BB" begin
test_bb()
end
test_cuts_blackbox_cplex() test_cuts_blackbox_cplex()
test_io() test_io()
test_problems_setcover() test_problems_setcover()

Loading…
Cancel
Save