From be0cd98e9d7b6a8865cfcb30e1b269b750cc54b0 Mon Sep 17 00:00:00 2001 From: Alinson S Xavier Date: Wed, 15 Sep 2021 08:29:37 -0500 Subject: [PATCH] Add implementation of textbook branch-and-bound method --- Manifest.toml | 4 +- Project.toml | 3 + src/MIPLearn.jl | 2 + src/bb/BB.jl | 22 ++++ src/bb/log.jl | 79 +++++++++++ src/bb/lp.jl | 212 ++++++++++++++++++++++++++++++ src/bb/nodepool.jl | 188 ++++++++++++++++++++++++++ src/bb/optimize.jl | 125 ++++++++++++++++++ src/bb/structs.jl | 70 ++++++++++ src/bb/varbranch/hybrid.jl | 31 +++++ src/bb/varbranch/infeasibility.jl | 54 ++++++++ src/bb/varbranch/pseudocost.jl | 45 +++++++ src/bb/varbranch/random.jl | 17 +++ src/bb/varbranch/reliability.jl | 78 +++++++++++ src/bb/varbranch/strong.jl | 93 +++++++++++++ test/bb/lp_test.jl | 115 ++++++++++++++++ test/fixtures/danoint.mps.gz | Bin 0 -> 14267 bytes test/fixtures/vpm2.mps.gz | Bin 0 -> 6210 bytes test/runtests.jl | 2 + test/solvers/jump_solver_test.jl | 1 - 20 files changed, 1138 insertions(+), 3 deletions(-) create mode 100644 src/bb/BB.jl create mode 100644 src/bb/log.jl create mode 100644 src/bb/lp.jl create mode 100644 src/bb/nodepool.jl create mode 100644 src/bb/optimize.jl create mode 100644 src/bb/structs.jl create mode 100644 src/bb/varbranch/hybrid.jl create mode 100644 src/bb/varbranch/infeasibility.jl create mode 100644 src/bb/varbranch/pseudocost.jl create mode 100644 src/bb/varbranch/random.jl create mode 100644 src/bb/varbranch/reliability.jl create mode 100644 src/bb/varbranch/strong.jl create mode 100644 test/bb/lp_test.jl create mode 100644 test/fixtures/danoint.mps.gz create mode 100644 test/fixtures/vpm2.mps.gz diff --git a/Manifest.toml b/Manifest.toml index 5aa38b6..058a1a3 100644 --- a/Manifest.toml +++ b/Manifest.toml @@ -126,9 +126,9 @@ version = "1.1.1" [[DataStructures]] deps = ["Compat", "InteractiveUtils", "OrderedCollections"] -git-tree-sha1 = "4437b64df1e0adccc3e5d1adbc3ac741095e4677" +git-tree-sha1 = "7d9d316f04214f7efdbb6398d545446e246eff02" uuid = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8" -version = "0.18.9" +version = "0.18.10" [[DataValueInterfaces]] git-tree-sha1 = "bfc1187b79289637fa0ef6d4436ebdfe6905cbd6" diff --git a/Project.toml b/Project.toml index aed7353..bc90f42 100644 --- a/Project.toml +++ b/Project.toml @@ -9,6 +9,7 @@ Cbc = "9961bab8-2fa3-5c5a-9d89-47fab24efd76" Clp = "e2554f3b-3117-50c0-817c-e040a3ddf72d" Conda = "8f4d0f93-b110-5947-807f-2305c1781a2d" DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0" +DataStructures = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8" Distributed = "8ba89e20-285c-5b6f-9357-94700520ee1b" JLD2 = "033835bb-8acc-5ee8-8aae-3f567f8a3819" JSON = "682c06a0-de6a-54ab-a142-c8b1cf79cde6" @@ -18,7 +19,9 @@ MathOptInterface = "b8f27783-ece8-5eb3-8dc8-9495eed66fee" PackageCompiler = "9b87118b-4619-50d2-8e1e-99f35a4d4d9d" Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" PyCall = "438e738f-606a-5dbb-bf0a-cddfbfd45ab0" +Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" +Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" TimerOutputs = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f" [compat] diff --git a/src/MIPLearn.jl b/src/MIPLearn.jl index 61d164d..d721aa3 100644 --- a/src/MIPLearn.jl +++ b/src/MIPLearn.jl @@ -33,6 +33,8 @@ include("solvers/macros.jl") include("utils/benchmark.jl") include("utils/parse.jl") +include("bb/BB.jl") + function __init__() copy!(miplearn, pyimport("miplearn")) copy!(traceback, pyimport("traceback")) diff --git a/src/bb/BB.jl b/src/bb/BB.jl new file mode 100644 index 0000000..4c84463 --- /dev/null +++ b/src/bb/BB.jl @@ -0,0 +1,22 @@ +# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization +# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved. +# Released under the modified BSD license. See COPYING.md for more details. + +module BB + +frac(x) = x - floor(x) + +include("structs.jl") + +include("nodepool.jl") +include("optimize.jl") +include("log.jl") +include("lp.jl") +include("varbranch/hybrid.jl") +include("varbranch/infeasibility.jl") +include("varbranch/pseudocost.jl") +include("varbranch/random.jl") +include("varbranch/reliability.jl") +include("varbranch/strong.jl") + +end # module diff --git a/src/bb/log.jl b/src/bb/log.jl new file mode 100644 index 0000000..a3cb2f2 --- /dev/null +++ b/src/bb/log.jl @@ -0,0 +1,79 @@ +# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization +# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved. +# Released under the modified BSD license. See COPYING.md for more details. + +using Printf + +function print_progress_header(; detailed_output::Bool) + @printf( + "%8s %9s %9s %13s %13s %13s %9s %8s", + "time", + "visited", + "pending", + "obj", + "primal-bound", + "dual-bound", + "gap", + "lp-iter" + ) + if detailed_output + @printf( + " %6s %6s %-24s %6s %6s %6s", + "node", + "parent", + "branch-var", + "b-val", + "depth", + "iinfes" + ) + end + println() + flush(stdout) +end + +function print_progress( + pool::NodePool, + node::Node; + time_elapsed::Float64, + print_interval::Int, + detailed_output::Bool, + primal_update::Bool, +)::Nothing + prefix = primal_update ? "*" : " " + if (pool.processed % print_interval == 0) || isempty(pool.pending) || primal_update + @printf( + "%8.2f %1s%9d %9d %13.6e %13.6e %13.6e %9.2e %8d", + time_elapsed, + prefix, + pool.processed, + length(pool.processing) + length(pool.pending), + node.obj * node.mip.sense, + pool.primal_bound * node.mip.sense, + pool.dual_bound * node.mip.sense, + pool.gap, + pool.mip.lp_iterations, + ) + if detailed_output + if isempty(node.branch_variables) + branch_var_name = "---" + branch_value = "---" + else + branch_var_name = name(node.mip, last(node.branch_variables)) + L = min(24, length(branch_var_name)) + branch_var_name = branch_var_name[1:L] + branch_value = @sprintf("%.2f", last(node.branch_values)) + end + @printf( + " %6d %6s %-24s %6s %6d %6d", + node.index, + node.parent === nothing ? "---" : @sprintf("%d", node.parent.index), + branch_var_name, + branch_value, + length(node.branch_variables), + length(node.fractional_variables) + ) + end + println() + flush(stdout) + end +end diff --git a/src/bb/lp.jl b/src/bb/lp.jl new file mode 100644 index 0000000..7ed1a36 --- /dev/null +++ b/src/bb/lp.jl @@ -0,0 +1,212 @@ +# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization +# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved. +# Released under the modified BSD license. See COPYING.md for more details. + +import Base: values, convert +using Base.Threads +import Base.Threads: @threads, nthreads, threadid +using JuMP +using MathOptInterface +const MOI = MathOptInterface + +function init(constructor)::MIP + return MIP(constructor, Any[nothing for t = 1:nthreads()], Variable[], 1.0, 0) +end + +function read!(mip::MIP, filename::AbstractString)::Nothing + @threads for t = 1:nthreads() + model = read_from_file(filename) + mip.optimizers[t] = backend(model) + _replace_zero_one!(mip.optimizers[t]) + if t == 1 + _assert_supported(mip.optimizers[t]) + mip.binary_variables = _get_binary_variables(mip.optimizers[t]) + mip.sense = _get_objective_sense(mip.optimizers[t]) + end + _relax_integrality!(mip.optimizers[t]) + set_optimizer(model, mip.constructor) + set_silent(model) + end + return +end + +function _assert_supported(optimizer::MOI.AbstractOptimizer)::Nothing + types = MOI.get(optimizer, MOI.ListOfConstraints()) + for (F, S) in types + _assert_supported(F, S) + end +end + +function _assert_supported(F::DataType, S::DataType)::Nothing + if F in [MOI.ScalarAffineFunction{Float64}, MOI.SingleVariable] && S in [ + MOI.LessThan{Float64}, + MOI.GreaterThan{Float64}, + MOI.EqualTo{Float64}, + MOI.Interval{Float64}, + ] + return + end + if F in [MOI.SingleVariable] && S in [MOI.Integer, MOI.ZeroOne] + return + end + error("MOI constraint not supported: $F in $S") +end + +function _get_objective_sense(optimizer::MOI.AbstractOptimizer)::Float64 + sense = MOI.get(optimizer, MOI.ObjectiveSense()) + if sense == MOI.MIN_SENSE + return 1.0 + elseif sense == MOI.MAX_SENSE + return -1.0 + else + error("objective sense not supported: $sense") + end +end + +_bounds_constraint(v::Variable) = + MOI.ConstraintIndex{MOI.SingleVariable,MOI.Interval{Float64}}(v.index) + +function _replace_zero_one!(optimizer::MOI.AbstractOptimizer)::Nothing + constrs_to_delete = MOI.ConstraintIndex[] + funcs = MOI.SingleVariable[] + sets = Union{MOI.Interval,MOI.Integer}[] + for ci in + MOI.get(optimizer, MOI.ListOfConstraintIndices{MOI.SingleVariable,MOI.ZeroOne}()) + func = MOI.get(optimizer, MOI.ConstraintFunction(), ci) + var = func.variable + push!(constrs_to_delete, ci) + push!(funcs, MOI.SingleVariable(var)) + push!(funcs, MOI.SingleVariable(var)) + push!(sets, MOI.Interval{Float64}(0.0, 1.0)) + push!(sets, MOI.Integer()) + end + MOI.delete(optimizer, constrs_to_delete) + MOI.add_constraints(optimizer, funcs, sets) + return +end + +function _get_binary_variables(optimizer::MOI.AbstractOptimizer)::Vector{Variable} + vars = Variable[] + for ci in + MOI.get(optimizer, MOI.ListOfConstraintIndices{MOI.SingleVariable,MOI.Integer}()) + func = MOI.get(optimizer, MOI.ConstraintFunction(), ci) + var = Variable(func.variable.value) + + MOI.is_valid(optimizer, _bounds_constraint(var)) || + error("$var is not interval-constrained") + interval = MOI.get(optimizer, MOI.ConstraintSet(), _bounds_constraint(var)) + interval.lower == 0.0 || error("$var has lb != 0") + interval.upper == 1.0 || error("$var has ub != 1") + + push!(vars, var) + end + return vars +end + +function _relax_integrality!(optimizer::MOI.AbstractOptimizer)::Nothing + indices = + MOI.get(optimizer, MOI.ListOfConstraintIndices{MOI.SingleVariable,MOI.Integer}()) + MOI.delete(optimizer, indices) +end + +""" + solve_relaxation(mip::MIP)::Tuple{Symbol, Float64} + +Solve the linear relaxation of `mip` and returns a tuple containing the +solution status (either :Optimal or :Infeasible) and the optimal objective +value. If the problem is infeasible, the optimal value is Inf for minimization +problems and -Inf for maximization problems.. +""" +function solve_relaxation!(mip::MIP)::Tuple{Symbol,Float64} + t = threadid() + MOI.optimize!(mip.optimizers[t]) + try + mip.lp_iterations += MOI.get(mip.optimizers[t], MOI.SimplexIterations()) + catch + # ignore + end + status = MOI.get(mip.optimizers[t], MOI.TerminationStatus()) + if status == MOI.OPTIMAL + obj = MOI.get(mip.optimizers[t], MOI.ObjectiveValue()) + return :Optimal, obj * mip.sense + elseif status in [MOI.INFEASIBLE, MOI.INFEASIBLE_OR_UNBOUNDED] + return :Infeasible, Inf * mip.sense + end + error("unknown status: $status") +end + +""" + values(mip::MIP, vars::Vector{Variable})::Array{Float64} + +Returns a vector `vals` which describes the current primal values for the +decision variables `vars`. More specifically, `vals[j]` is the current +primal value of `vars[j]`. +""" +function values(mip::MIP, vars::Vector{Variable})::Array{Float64} + return MOI.get( + mip.optimizers[threadid()], + MOI.VariablePrimal(), + convert.(MOI.VariableIndex, vars), + ) +end + +values(mip::MIP) = + values(mip, MOI.get(mip.optimizers[threadid()], MOI.ListOfVariableIndices())) + +""" + set_bounds!(mip::MIP, + vars::Vector{Variable}, + lb::Array{Float64}, + ub::Array{Float64}) + +Modify the bounds of the given variables. More specifically, sets +upper and lower bounds of `vars[j]` to `ub[j]` and `lb[j]`, respectively. +""" +function set_bounds!( + mip::MIP, + vars::Vector{Variable}, + lb::Array{Float64}, + ub::Array{Float64}, +)::Nothing + t = threadid() + MOI.delete(mip.optimizers[t], _bounds_constraint.(vars)) + funcs = MOI.SingleVariable[] + sets = MOI.Interval[] + for j = 1:length(vars) + push!(funcs, MOI.SingleVariable(vars[j])) + push!(sets, MOI.Interval(lb[j], ub[j])) + end + MOI.add_constraints(mip.optimizers[t], funcs, sets) + return +end + +""" + name(mip::MIP, var::Variable)::String + +Return the name of the decision variable `var`. +""" +function name(mip::MIP, var::Variable)::String + t = threadid() + return MOI.get(mip.optimizers[t], MOI.VariableName(), convert(MOI.VariableIndex, var)) +end + +convert(::Type{MOI.VariableIndex}, v::Variable) = MOI.VariableIndex(v.index) + +""" + probe(mip::MIP, var)::Tuple{Float64, Float64} + +Suppose that the LP relaxation of `mip` has been solved and that `var` holds +a fractional value `f`. This function returns two numbers corresponding, +respectively, to the the optimal values of the LP relaxations having the +constraints var=1 and var=0 enforced. If any branch is infeasible, the optimal +value for that branch is Inf for minimization problems and -Inf for maximization +problems. +""" +function probe(mip::MIP, var)::Tuple{Float64,Float64} + set_bounds!(mip, [var], [1.0], [1.0]) + status_up, obj_up = solve_relaxation!(mip) + set_bounds!(mip, [var], [0.0], [0.0]) + status_down, obj_down = solve_relaxation!(mip) + set_bounds!(mip, [var], [0.0], [1.0]) + return obj_up * mip.sense, obj_down * mip.sense +end diff --git a/src/bb/nodepool.jl b/src/bb/nodepool.jl new file mode 100644 index 0000000..dae435f --- /dev/null +++ b/src/bb/nodepool.jl @@ -0,0 +1,188 @@ +# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization +# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved. +# Released under the modified BSD license. See COPYING.md for more details. + +using Statistics +using DataStructures +import Base.Threads: threadid + +function take( + pool::NodePool; + suggestions::Array{Node} = [], + time_remaining::Float64, + gap_limit::Float64, + node_limit::Int, +)::Union{Symbol,Node} + t = threadid() + lock(pool.lock) do + n_processing = length(pool.processing) + if ( + (pool.gap < gap_limit) || + (n_processing + pool.processed >= node_limit) || + (time_remaining < 0) + ) + return :END + end + if isempty(pool.pending) + if isempty(pool.processing) + return :END + else + return :WAIT + end + else + # If one of the suggested nodes is still pending, return it. + # This is known in the literature as plunging. + for s in suggestions + if s in keys(pool.pending) + delete!(pool.pending, s) + pool.processing[s] = s.obj + return s + end + end + + # If all suggestions have already been processed + # or pruned, find another node based on best bound. + node = dequeue!(pool.pending) + pool.processing[node] = node.obj + return node + end + end +end + +function offer( + pool::NodePool; + parent_node::Union{Nothing,Node}, + child_nodes::Vector{Node}, + time_elapsed::Float64 = 0.0, + print_interval::Int = 100, + detailed_output::Bool = false, +)::Nothing + lock(pool.lock) do + primal_update = false + + # Update node.processing and node.processed + pool.processed += 1 + if parent_node !== nothing + delete!(pool.processing, parent_node) + end + + # Queue child nodes + for node in child_nodes + if node.status == :Infeasible + continue + end + if node.obj >= pool.primal_bound + continue + end + if isempty(node.fractional_variables) + pool.primal_bound = min(pool.primal_bound, node.obj) + primal_update = true + continue + end + pool.pending[node] = node.obj + end + + # Update dual bound + pool.dual_bound = pool.primal_bound + if !isempty(pool.pending) + pool.dual_bound = min(pool.dual_bound, peek(pool.pending)[2]) + end + if !isempty(pool.processing) + pool.dual_bound = min(pool.dual_bound, peek(pool.processing)[2]) + end + + # Update gap + if pool.primal_bound == pool.dual_bound + pool.gap = 0 + else + pool.gap = abs((pool.primal_bound - pool.dual_bound) / pool.primal_bound) + end + + if parent_node !== nothing + # Update branching variable history + branch_var = child_nodes[1].branch_variables[end] + offset = findfirst(isequal(branch_var), parent_node.fractional_variables) + x = parent_node.fractional_values[offset] + obj_change_up = child_nodes[1].obj - parent_node.obj + obj_change_down = child_nodes[2].obj - parent_node.obj + _update_var_history( + pool = pool, + var = branch_var, + x = x, + obj_change_down = obj_change_down, + obj_change_up = obj_change_up, + ) + + # Update global history + pool.history.avg_pseudocost_up = + mean(vh.pseudocost_up for vh in values(pool.var_history)) + pool.history.avg_pseudocost_down = + mean(vh.pseudocost_down for vh in values(pool.var_history)) + + # Print progress + print_progress( + pool, + parent_node, + time_elapsed = time_elapsed, + print_interval = print_interval, + detailed_output = detailed_output, + primal_update = primal_update, + ) + end + end + return +end + +function _update_var_history(; + pool::NodePool, + var::Variable, + x::Float64, + obj_change_down::Float64, + obj_change_up::Float64, +)::Nothing + # Create new history entry + if var ∉ keys(pool.var_history) + pool.var_history[var] = VariableHistory() + end + varhist = pool.var_history[var] + + # Push fractional value + push!(varhist.fractional_values, x) + + # Push objective value changes + push!(varhist.obj_change_up, obj_change_up) + push!(varhist.obj_change_down, obj_change_down) + + # Push objective change ratios + f_up = x - floor(x) + f_down = ceil(x) - x + if isfinite(obj_change_up) + push!(varhist.obj_ratio_up, obj_change_up / f_up) + end + if isfinite(obj_change_down) + push!(varhist.obj_ratio_down, obj_change_down / f_down) + end + + # Update variable pseudocosts + varhist.pseudocost_up = 0.0 + varhist.pseudocost_down = 0.0 + if !isempty(varhist.obj_ratio_up) + varhist.pseudocost_up = sum(varhist.obj_ratio_up) / length(varhist.obj_ratio_up) + end + if !isempty(varhist.obj_ratio_down) + varhist.pseudocost_down = + sum(varhist.obj_ratio_down) / length(varhist.obj_ratio_down) + end + return +end + +function generate_indices(pool::NodePool, n::Int)::Vector{Int} + lock(pool.lock) do + result = Int[] + for i = 1:n + push!(result, pool.next_index) + pool.next_index += 1 + end + return result + end +end diff --git a/src/bb/optimize.jl b/src/bb/optimize.jl new file mode 100644 index 0000000..ed5cbd2 --- /dev/null +++ b/src/bb/optimize.jl @@ -0,0 +1,125 @@ +# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization +# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved. +# Released under the modified BSD license. See COPYING.md for more details. + +using Printf +using Base.Threads +import Base.Threads: @threads, nthreads, threadid + +function solve!( + mip::MIP; + time_limit::Float64 = Inf, + node_limit::Int = typemax(Int), + gap_limit::Float64 = 1e-4, + print_interval::Int = 5, + initial_primal_bound::Float64 = Inf, + detailed_output::Bool = false, + branch_rule::VariableBranchingRule = HybridBranching(), +)::NodePool + time_initial = time() + pool = NodePool(mip = mip) + pool.primal_bound = initial_primal_bound + print_progress_header(detailed_output = detailed_output) + + root_node = _create_node(mip) + if isempty(root_node.fractional_variables) + println("root relaxation is integer feasible") + pool.dual_bound = root_node.obj + pool.primal_bound = root_node.obj + return pool + end + + offer(pool, parent_node = nothing, child_nodes = [root_node]) + @threads for t = 1:nthreads() + child_one, child_zero, suggestions = nothing, nothing, Node[] + while true + time_elapsed = time() - time_initial + if child_one !== nothing + suggestions = Node[child_one, child_zero] + end + node = take( + pool, + suggestions = suggestions, + time_remaining = time_limit - time_elapsed, + node_limit = node_limit, + gap_limit = gap_limit, + ) + if node == :END + break + elseif node == :WAIT + sleep(0.1) + continue + else + ids = generate_indices(pool, 2) + branch_variable = find_branching_var(branch_rule, node, pool) + child_zero = _create_node( + mip, + index = ids[1], + parent = node, + branch_variable = branch_variable, + branch_value = 0.0, + ) + child_one = _create_node( + mip, + index = ids[2], + parent = node, + branch_variable = branch_variable, + branch_value = 1.0, + ) + offer( + pool, + parent_node = node, + child_nodes = [child_one, child_zero], + time_elapsed = time_elapsed, + print_interval = print_interval, + detailed_output = detailed_output, + ) + end + end + end + return pool +end + +function _create_node( + mip; + index::Int = 0, + parent::Union{Nothing,Node} = nothing, + branch_variable::Union{Nothing,Variable} = nothing, + branch_value::Union{Nothing,Float64} = nothing, +)::Node + if parent === nothing + branch_variables = Variable[] + branch_values = Float64[] + depth = 1 + else + branch_variables = [parent.branch_variables; branch_variable] + branch_values = [parent.branch_values; branch_value] + depth = parent.depth + 1 + end + set_bounds!(mip, branch_variables, branch_values, branch_values) + status, obj = solve_relaxation!(mip) + if status == :Optimal + vals = values(mip, mip.binary_variables) + fractional_indices = + [j for j in 1:length(mip.binary_variables) if 1e-6 < vals[j] < 1 - 1e-6] + fractional_values = vals[fractional_indices] + fractional_variables = mip.binary_variables[fractional_indices] + else + fractional_variables = Variable[] + fractional_values = Float64[] + end + n_branch = length(branch_variables) + set_bounds!(mip, branch_variables, zeros(n_branch), ones(n_branch)) + return Node( + mip, + index, + depth, + obj, + status, + branch_variables, + branch_values, + fractional_variables, + fractional_values, + parent, + ) +end diff --git a/src/bb/structs.jl b/src/bb/structs.jl new file mode 100644 index 0000000..36cc158 --- /dev/null +++ b/src/bb/structs.jl @@ -0,0 +1,70 @@ +# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization +# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved. +# Released under the modified BSD license. See COPYING.md for more details. + +using DataStructures +abstract type VariableBranchingRule end + +struct Variable + index::Any +end + +mutable struct MIP + constructor + optimizers::Vector + binary_variables::Vector{Variable} + sense::Float64 + lp_iterations::Int64 +end + +struct Node + mip::MIP + index::Int + depth::Int + obj::Float64 + status::Symbol + branch_variables::Array{Variable} + branch_values::Array{Float64} + fractional_variables::Array{Variable} + fractional_values::Array{Float64} + parent::Union{Nothing,Node} +end + +Base.@kwdef mutable struct History + avg_pseudocost_up::Float64 = 1.0 + avg_pseudocost_down::Float64 = 1.0 +end + +mutable struct VariableHistory + fractional_values::Array{Float64} + obj_change_up::Array{Float64} + obj_change_down::Array{Float64} + obj_ratio_up::Array{Float64} + obj_ratio_down::Array{Float64} + pseudocost_up::Float64 + pseudocost_down::Float64 + + VariableHistory() = new( + Float64[], # fractional_values + Float64[], # obj_change_up + Float64[], # obj_change_down + Float64[], # obj_ratio_up + Float64[], # obj_ratio_up + 0.0, # pseudocost_up + 0.0, # pseudocost_down + ) +end + +Base.@kwdef mutable struct NodePool + mip::MIP + pending::PriorityQueue{Node,Float64} = PriorityQueue{Node,Float64}() + processing::PriorityQueue{Node,Float64} = PriorityQueue{Node,Float64}() + processed::Int = 0 + next_index::Int = 1 + lock::ReentrantLock = ReentrantLock() + primal_bound::Float64 = Inf + dual_bound::Float64 = Inf + gap::Float64 = Inf + history::History = History() + var_history::Dict{Variable,VariableHistory} = Dict() +end diff --git a/src/bb/varbranch/hybrid.jl b/src/bb/varbranch/hybrid.jl new file mode 100644 index 0000000..fa5d959 --- /dev/null +++ b/src/bb/varbranch/hybrid.jl @@ -0,0 +1,31 @@ +# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization +# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved. +# Released under the modified BSD license. See COPYING.md for more details. + +""" + HybridBranching(depth_cutoff::Int, + shallow_rule::VariableBranchingRule, + deep_rule::::VariableBranchingRule) + +Branching strategy that switches between two variable branching strategies, +according to the depth of the node. + +More specifically, if `node.depth <= depth_cutoff`, then `shallow_rule` is +applied. Otherwise, `deep_rule` is applied. +""" +mutable struct HybridBranching <: VariableBranchingRule + depth_cutoff::Int + shallow_rule::VariableBranchingRule + deep_rule::VariableBranchingRule +end + +HybridBranching(depth_cutoff::Int = 10) = + HybridBranching(depth_cutoff, StrongBranching(), PseudocostBranching()) + +function find_branching_var(rule::HybridBranching, node::Node, pool::NodePool)::Variable + if node.depth <= rule.depth_cutoff + return find_branching_var(rule.shallow_rule, node, pool) + else + return find_branching_var(rule.deep_rule, node, pool) + end +end diff --git a/src/bb/varbranch/infeasibility.jl b/src/bb/varbranch/infeasibility.jl new file mode 100644 index 0000000..2c99ed8 --- /dev/null +++ b/src/bb/varbranch/infeasibility.jl @@ -0,0 +1,54 @@ +# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization +# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved. +# Released under the modified BSD license. See COPYING.md for more details. + +""" + FirstInfeasibleBranching() + +Branching rule that always selects the first fractional variable. +""" +struct FirstInfeasibleBranching <: VariableBranchingRule end + +function find_branching_var( + rule::FirstInfeasibleBranching, + node::Node, + pool::NodePool, +)::Variable + return node.fractional_variables[1] +end + +""" + LeastInfeasibleBranching() + +Branching strategy that select the fractional variable whose value is the closest +to an integral value. +""" +struct LeastInfeasibleBranching <: VariableBranchingRule end + +function find_branching_var( + rule::LeastInfeasibleBranching, + node::Node, + pool::NodePool, +)::Variable + scores = [max(v - floor(v), ceil(v) - v) for v in node.fractional_values] + _, max_offset = findmax(scores) + return node.fractional_variables[max_offset] +end + +""" + MostInfeasibleBranching() + +Branching strategy that selects the fractional variable whose value is closest +to 1/2. +""" +struct MostInfeasibleBranching <: VariableBranchingRule end + +function find_branching_var( + rule::MostInfeasibleBranching, + node::Node, + pool::NodePool, +)::Variable + scores = [min(v - floor(v), ceil(v) - v) for v in node.fractional_values] + _, max_offset = findmax(scores) + return node.fractional_variables[max_offset] +end diff --git a/src/bb/varbranch/pseudocost.jl b/src/bb/varbranch/pseudocost.jl new file mode 100644 index 0000000..8b3cff0 --- /dev/null +++ b/src/bb/varbranch/pseudocost.jl @@ -0,0 +1,45 @@ +# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization +# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved. +# Released under the modified BSD license. See COPYING.md for more details. + +""" + PseudocostBranching() + +Branching strategy that uses historical changes in objective value to estimate +strong branching scores at lower computational cost. +""" +struct PseudocostBranching <: VariableBranchingRule end + +function find_branching_var(rule::PseudocostBranching, node::Node, pool::NodePool)::Variable + scores = [ + _pseudocost_score( + node, + pool, + node.fractional_variables[j], + node.fractional_values[j], + ) for j = 1:length(node.fractional_variables) + ] + _, max_offset = findmax(scores) + return node.fractional_variables[max_offset] +end + +function _pseudocost_score( + node::Node, + pool::NodePool, + var::Variable, + x::Float64, +)::Tuple{Float64,Int} + f_up = x - floor(x) + f_down = ceil(x) - x + pseudo_up = pool.history.avg_pseudocost_up * f_up + pseudo_down = pool.history.avg_pseudocost_down * f_down + if var in keys(pool.var_history) + if isfinite(pool.var_history[var].pseudocost_up) + pseudo_up = pool.var_history[var].pseudocost_up * f_up + end + if isfinite(pool.var_history[var].pseudocost_down) + pseudo_down = pool.var_history[var].pseudocost_down * f_down + end + end + return (pseudo_up * f_up * pseudo_down * f_down, var.index) +end diff --git a/src/bb/varbranch/random.jl b/src/bb/varbranch/random.jl new file mode 100644 index 0000000..fd09c30 --- /dev/null +++ b/src/bb/varbranch/random.jl @@ -0,0 +1,17 @@ +# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization +# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved. +# Released under the modified BSD license. See COPYING.md for more details. + +using Random + + +""" + RandomBranching() + +Branching strategy that picks a fractional variable randomly. +""" +struct RandomBranching <: VariableBranchingRule end + +function find_branching_var(rule::RandomBranching, node::Node, pool::NodePool)::Variable + return shuffle(node.fractional_variables)[1] +end diff --git a/src/bb/varbranch/reliability.jl b/src/bb/varbranch/reliability.jl new file mode 100644 index 0000000..a664459 --- /dev/null +++ b/src/bb/varbranch/reliability.jl @@ -0,0 +1,78 @@ +# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization +# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved. +# Released under the modified BSD license. See COPYING.md for more details. + +""" + ReliabilityBranching + +Branching strategy that uses pseudocosts if there are enough observations +to make an accurate prediction of strong branching scores, or runs the +actual strong branching routine if not enough observations are available. +""" +Base.@kwdef mutable struct ReliabilityBranching <: VariableBranchingRule + min_samples::Int = 8 + max_sb_calls::Int = 100 + look_ahead::Int = 10 + n_sb_calls::Int = 0 + side_effect::Bool = true +end + +function find_branching_var( + rule::ReliabilityBranching, + node::Node, + pool::NodePool, +)::Variable + nfrac = length(node.fractional_variables) + pseudocost_scores = [ + _pseudocost_score( + node, + pool, + node.fractional_variables[j], + node.fractional_values[j], + ) for j = 1:nfrac + ] + σ = sortperm(pseudocost_scores, rev = true) + sorted_vars = node.fractional_variables[σ] + _strong_branch_start(node) + no_improv_count, n_sb_calls = 0, 0 + max_score, max_var = pseudocost_scores[σ[1]], sorted_vars[1] + for (i, var) in enumerate(sorted_vars) + use_strong_branch = true + if n_sb_calls >= rule.max_sb_calls + use_strong_branch = false + else + if var in keys(pool.var_history) + varhist = pool.var_history[var] + hlength = min( + length(varhist.obj_ratio_up), + length(varhist.obj_ratio_down), + ) + if hlength >= rule.min_samples + use_strong_branch = false + end + end + end + if use_strong_branch + n_sb_calls += 1 + rule.n_sb_calls += 1 + score = _strong_branch_score( + node = node, + pool = pool, + var = var, + x = node.fractional_values[σ[i]], + side_effect = rule.side_effect, + ) + else + score = pseudocost_scores[σ[i]] + end + if score > max_score + max_score, max_var = score, var + no_improv_count = 0 + else + no_improv_count += 1 + end + no_improv_count <= rule.look_ahead || break + end + _strong_branch_end(node) + return max_var +end diff --git a/src/bb/varbranch/strong.jl b/src/bb/varbranch/strong.jl new file mode 100644 index 0000000..f4fae31 --- /dev/null +++ b/src/bb/varbranch/strong.jl @@ -0,0 +1,93 @@ +# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization +# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved. +# Released under the modified BSD license. See COPYING.md for more details. + +using Random + +""" + StrongBranching(look_ahead::Int, max_calls::Int) + +Branching strategy that selects a subset of fractional variables +as candidates (according to pseudocosts) the solves two linear +programming problems for each candidate. + + +""" +Base.@kwdef struct StrongBranching <: VariableBranchingRule + look_ahead::Int = 10 + max_calls::Int = 100 + side_effect::Bool = true +end + +function find_branching_var(rule::StrongBranching, node::Node, pool::NodePool)::Variable + nfrac = length(node.fractional_variables) + pseudocost_scores = [ + _pseudocost_score( + node, + pool, + node.fractional_variables[j], + node.fractional_values[j], + ) for j = 1:nfrac + ] + σ = sortperm(pseudocost_scores, rev = true) + sorted_vars = node.fractional_variables[σ] + _strong_branch_start(node) + no_improv_count, call_count = 0, 0 + max_score, max_var = pseudocost_scores[σ[1]], sorted_vars[1] + for (i, var) in enumerate(sorted_vars) + call_count += 1 + score = _strong_branch_score( + node = node, + pool = pool, + var = var, + x = node.fractional_values[σ[i]], + side_effect = rule.side_effect, + ) + if score > max_score + max_score, max_var = score, var + no_improv_count = 0 + else + no_improv_count += 1 + end + no_improv_count <= rule.look_ahead || break + call_count <= rule.max_calls || break + end + _strong_branch_end(node) + return max_var +end + +function _strong_branch_score(; + node::Node, + pool::NodePool, + var::Variable, + x::Float64, + side_effect::Bool, +)::Tuple{Float64,Int} + obj_up, obj_down = 0, 0 + try + obj_up, obj_down = probe(node.mip, var) + catch + @warn "strong branch error" var=var + end + obj_change_up = obj_up - node.obj + obj_change_down = obj_down - node.obj + if side_effect + _update_var_history( + pool = pool, + var = var, + x = x, + obj_change_down = obj_change_down, + obj_change_up = obj_change_up, + ) + end + return (obj_change_up * obj_change_down, var.index) +end + +function _strong_branch_start(node::Node) + set_bounds!(node.mip, node.branch_variables, node.branch_values, node.branch_values) +end + +function _strong_branch_end(node::Node) + nbranch = length(node.branch_variables) + set_bounds!(node.mip, node.branch_variables, zeros(nbranch), ones(nbranch)) +end diff --git a/test/bb/lp_test.jl b/test/bb/lp_test.jl new file mode 100644 index 0000000..304e11b --- /dev/null +++ b/test/bb/lp_test.jl @@ -0,0 +1,115 @@ +# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization +# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved. +# Released under the modified BSD license. See COPYING.md for more details. + +using Clp +using JuMP +using Test +using MIPLearn.BB + +basepath = @__DIR__ + +function runtests(optimizer_name, optimizer; large = true) + @testset "Solve ($optimizer_name)" begin + @testset "interface" begin + filename = "$basepath/../fixtures/danoint.mps.gz" + + mip = BB.init(optimizer) + BB.read!(mip, filename) + + @test mip.sense == 1.0 + @test length(mip.binary_variables) == 56 + + status, obj = BB.solve_relaxation!(mip) + @test status == :Optimal + @test round(obj, digits = 6) == 62.637280 + + @test BB.name(mip, mip.binary_variables[1]) == "xab" + @test BB.name(mip, mip.binary_variables[2]) == "xac" + @test BB.name(mip, mip.binary_variables[3]) == "xad" + + vals = BB.values(mip, mip.binary_variables) + @test round(vals[1], digits = 6) == 0.046933 + @test round(vals[2], digits = 6) == 0.000841 + @test round(vals[3], digits = 6) == 0.248696 + + # Probe (up and down are feasible) + probe_up, probe_down = BB.probe(mip, mip.binary_variables[1]) + @test round(probe_down, digits = 6) == 62.690000 + @test round(probe_up, digits = 6) == 62.714100 + + # Fix one variable to zero + BB.set_bounds!(mip, mip.binary_variables[1:1], [0.0], [0.0]) + status, obj = BB.solve_relaxation!(mip) + @test status == :Optimal + @test round(obj, digits = 6) == 62.690000 + + # Fix one variable to one and another variable variable to zero + BB.set_bounds!(mip, mip.binary_variables[1:2], [1.0, 0.0], [1.0, 0.0]) + status, obj = BB.solve_relaxation!(mip) + @test status == :Optimal + @test round(obj, digits = 6) == 62.714777 + + # Probe (up is infeasible, down is feasible) + BB.set_bounds!( + mip, + mip.binary_variables[1:3], + [1.0, 1.0, 0.0], + [1.0, 1.0, 1.0], + ) + status, obj = BB.solve_relaxation!(mip) + @test status == :Optimal + probe_up, probe_down = BB.probe(mip, mip.binary_variables[3]) + @test round(probe_up, digits = 6) == Inf + @test round(probe_down, digits = 6) == 63.073992 + + # Fix all binary variables to one, making problem infeasible + N = length(mip.binary_variables) + BB.set_bounds!(mip, mip.binary_variables, ones(N), ones(N)) + status, obj = BB.solve_relaxation!(mip) + @test status == :Infeasible + @test obj == Inf + + # Restore original problem + N = length(mip.binary_variables) + BB.set_bounds!(mip, mip.binary_variables, zeros(N), ones(N)) + status, obj = BB.solve_relaxation!(mip) + @test status == :Optimal + @test round(obj, digits = 6) == 62.637280 + end + + @testset "varbranch" begin + branch_rules = [ + BB.RandomBranching(), + BB.FirstInfeasibleBranching(), + BB.LeastInfeasibleBranching(), + BB.MostInfeasibleBranching(), + BB.PseudocostBranching(), + BB.StrongBranching(), + BB.ReliabilityBranching(), + BB.HybridBranching(), + ] + for branch_rule in branch_rules + filename = "$basepath/../fixtures/vpm2.mps.gz" + mip = BB.init(optimizer) + BB.read!(mip, filename) + @info optimizer_name, branch_rule + @time BB.solve!( + mip, + initial_primal_bound = 13.75, + print_interval = 10, + node_limit = 100, + branch_rule = branch_rule, + ) + end + end + end +end + +@testset "BB" begin + @time runtests("Clp", Clp.Optimizer) + if is_gurobi_available + using Gurobi + @time runtests("Gurobi", Gurobi.Optimizer) + end +end diff --git a/test/fixtures/danoint.mps.gz b/test/fixtures/danoint.mps.gz new file mode 100644 index 0000000000000000000000000000000000000000..1222df58a24d5574033a1b4cb4d02a03d6ebd2af GIT binary patch literal 14267 zcmb_?dpuNY+rLAlB1$>5l~c9L9*2!xv7M8xQdG(?NKJ-{C{s?uAiLO9*oAs9qk~jp zGE5q$sdh@lNSMZHlylC#5~ksEQw%|_78Br|3vc}!0x zr$kV|9UkNJ6_})N!;{@{jN(UT#2~_0KhMtBbY`kRg;P&%F(b@&4WxXV8fj@7y;sy! z)a1!zd>au&bqbou9OK!k*-jRhLNfWrLr<~)vh!t>G6)y>(hw5xoZnMpDF#%oGuhG`DHQu#bnRLLZn z5lUUY@%wmVzMnsi3Vc70se}>Fe+%HLWPBYR8T0)nNBq`aVD^n4W=fcCp)h7=KFV-H zr+NpLrlh)iz0qOJOm$iIzTth$?7VYc&7^IbGB(6WW7&O<>zpAtHgHVIVc&+V1O*l$ z&s@dAr&>}9WDdBJ&Z67cqaIGp$;@88z#zrR$@VDyWvFG+;&B!)RI%rd{>p00+}XYK zlAL%{#9P(lt9AD%cF4~RkvSx^)P`lsk>M7vRZqmLIWiTG_sZE&9D~bhQAx*g^Z%-q zwtIQvbX3O1B!9=o6Q|=cOp+!8S1Q{OGzW6?-`0xSNN8?+df|ora>rkf_l9R|O1kd& zE4DW}V-soD>)JIoTQ$?3UU*fz0{Sw%PBy99@z;~RS2OgHFD`bU`9nk>?u5L7l<_jr z$mo|xX;KzNlme0OCXB~Gdk3zRwlUHicnTRL+}nQGvGF8qkTkNlIBC}l#Gu>@(B9UX z8%NfuLk6Ab{XJuCk}dotGyds?H?`|+JT=pD;V(nn36ApYN=9fNhec@nd026Yl=?Ds zz1|XjXolfVU@IdRvS5STLY~uDRdp~`s>BX)jvlo-p@KC?Q6Q=9R7PAgU3L3 z{%-N6tslDa1a#wODZOQ$Q9D)O^o8V5s^C+xGKv#obb}%hpHYUqH^B_m{ZP0+9T`2ODyH6z13V zKE}MBKNb{~P2k?|{@MUp3p*CJAR4k(SjV~s)|-#QbsY^S%=N&r6LXmjEGS+aloj*G zf|AP?x)-=WXzi;-7OaJ>or_p19I-ZCBx2pa-;Y6pvPTg(x_5Cm+`nc+PRPuoWq1a+TapkCaG?Y{hnWDRjF--XWfTOKj*aaafe2VC3&FBh)J_co zOYwQIOu8Pp5(2F6U>uqp zP&Jv<|AKmNi?g{7n;l<5+B^A^-QIUenFaZ8FGaCO$CD;|hJSV@wbm-@DC3NRoRTM( zkN%wEs_RL|UWY$PuDZT-dy^vjlFpbQ|FUBE^HOKbQvb3`owlp`ZC53sN0MDly+1QjRt52XiD2*CQq=4kNs3i^I#Ikqc31!7?!YC4V?<)(fGR68 zsid3rv4CXhf1I7mEJb(OVq+S|X3aUP*bsM|S;@33is%Whp>k(}r~O;w2g=gCe5u!9{cEXxcJu+g*b^~gLL;OVvV4sJz^V`k;vXF)ktwnN-ylE%Ex{4 z+0lUuQq1$+x8sym13pI%T@!<T)!>vhT;)I zt4dEomI)_5`Doz#QjKy86qSU}etyZp<&lF6p=9zdNSkL4inY%(n8Zi!5+F|W_y|3t zKP+h7)_Aavp5bP3wpk35Fp=t2CYS$ZAMHzHA5Fn^@-&zKYIRQ#g>@77N-k7eIs^sp zNBpIram*Dp8T;`lDkojeiAi^Gu|VY>onnsfnXPD=;(2(UaOUO}o3PFGiYn zw{IzKASkTO)`L&1i7oNUH3NHN4k26Ov$(v{R&~;N@`Stu982Mm%Nt3S;NXYQ1lu}g zu<~GU&{OwJ;=#V4uym#ap)^cd?#CEPMG)fN4-pO*j-Y;}KI=Y+Dm+$KPg+ZhVwbAa z1^bj=7o9uJv?WZ*nLMC$r|PS zyP22|++8_L(*e#kP*jb0HF2E(>c}(9RqVa*T|q$vvT-1@hwvx%XK@XmbW%xUG#42! zIw_}fns|o>*17#~S?#PA+MNS}0O0`D-t6H->XS_b$I8U(DFWY9Lt|N9^u-&Vp+Flx zxU4pi7e*G0I(QIttv+;*hQEiv6QAqC#>vpFPL;jVLSjCiE@x67Q_AuRojm>RU%FBe zByR8kw5Y6gOjWwMvVZe~hb9V@iLTE;HIXH)&wS`!ykBrOv>)RNToo-9-a63zvPV3u zlhVqXb&9+bo&B>vrG#cVfSsTsT(lwf*Y6ub6`!tWMD5?)HJE_pN%2Qq$(C@3o5i8` z<-|{wnbWYU7~OOSsS-`XMBs~9aju?GxrVaLYg2xyHL;Pw3)^lrf{7Ln_2=DX)iYcR zmOBT3&=N)BA)1d($LNj+4?F`%`8xS{>KO}d4J1~BAg6kxV z0b^H?I#fEYwX=rST%(gei6>81=^WqFK4!}AMBU!#=)VrnEw|(U_yZasBq%A!OQs4O z>qjBd2*QbAbPr}L2AB9Z0U&U-IKF(mnB_%r2WS-&XOjfr7=V@J8W2lDHOWGvCgw@E z3u;+SOvIV88k^iEZGOIQvY2IhcgZormb?;xzP1g&=o+mIw<5<<1wmQORQ~g6U)!T9 zXp!vl;A$FHBwO(zaO=9F0jeF}R1bzL8d?v=EB6Cy^X`c}=NkV(4TY0{>$Wdz+W3;g z%?*C{vf&_2?pmlxa3fgoY{ds ztuIEz&f8W?kX@H#D?W0#x|?EE7b8e-z^kc)@&Nw0(apI9R?@n6XZ}_bGfFtY4{Z$# zrM3uB>T0^&ruvwK9$GX;S8oB)+!)e3;V%o;uTA^G0yRpK%yz66BKVqo?0t-`Y>E6K z)Nc^%k3LzJ7kTUMWNM)F&q&;Z@+%$aI5+eo|+TQ zPN+)T%`Vb*9^lQg8Q-pi3Hr(AsIMvauI6LKvTr?f*uR^yr7q@CGOjb|p|NV7rP;%6 z{oAOC^c&^cTmZ#oS=vS3YcEdthIy#%u-nGeltoZ`?WwZeG^~2C)*XlaC7M|#3KH)A zJrXq1jm+{}X}kRgf`$@9>LG~Yx%68dnlg@n!BjWGVgC`-k9S$JXRHi?!fEZJIp5wm zZIRvGp`quoH31;o#)l>n+1rYq^~vW}S%1)4fmGm%eC%C}?wXRMgQyCN%2~;p`Gweb z2Zaj*KLi};3v%-94LIV1z^u>3mHaYKedgK_L*(wIp%kuQcX5JqeGh=$RvV4TnY zr9~()mSA)bW#1}qJhm%)8^nL_^iRE?^aGMGwdctD>*__PtP)x_sf^MCmzB8XN?{I7 zPVPZ@)WqCAQ?@@J`&;&{%MS_0ZDkP(dPd`sgIwQ3)(HZP$5z9h5pxjyG3}!0FAQpH z?a=O{EN49=8v3Wo_S3LG_QkBHVFAn+n+@;sfOU{#&Q11SU@}QXjRi4w?mKh|6VXq% zEKnYg<@BAD+UmVN7y!+Vfw z_Z;3ssb&>)94Wk`d*9naXPjA1ZyXcgAAY)?6Cz!Opy2y-IsN*W+fPs)buqV3mzfq# zZlK+Tki}t(uF*S-%GR+>9Y2wWFu9x-%9%1Lq!6Sqy7t+({y0{-(7aPaL96kMctmii zp@+soT@QJuI+Chzj+Ho*ZZG=!20wk*$|`rQo$VMO4=;M?WTi6iG$&AS=HP%Kuhq!I z8YCGwSP7HMydyBZ*@^EdRcWt!NxW)S%Ij+ z>bGaadWjlzu}mzRBx(C|F(NT(ql~Y4m6B~sU)VEfPfWS(N(WyzSNNQI5x=ZMoOrf; z;@W3Bzai%~w7Ka^3i4B;fRmi&wgQ;Gb*er*tE9Q1RVUgoAUX7FfVZ~D z+nGYoZG2_%x9dfur!LyDmp$5OJkhkL4{tY|m8Yt*ZRmnRA|)-S^-|^GD?=xp=!L-iq-uHv=qlii1wG6L(q+uHoHC49Hhi z8TPWwVYu{c<}49e=&@1hJGgH2C1bVG6~yhryOhCQWautnm_tYHZ-qTNkJkdjt}QUk z7rPDd*pR-+b$yWy!+j*`s>4QsU)^NxFipzlqPDY13-W~Y0#9UI+{!z=3W{wm^@O?U z!PZsuQJ+E_x^+pAd54`t>$^*EbZ4h&H$voAEb^Fh!*`Dz?Wi4m)-yD<4j6V|p<%8X zuwkc$v`=sF6ftOHnc$S}`w!PKR{lmN#JH_0mzZgc@6%ao@-CoWgdz6*+{Ju6o0q7{ zjSjgqcNb;Cc*1u|v_mvufn!VXkYkl$$37FfEPZq{Cw2+qGIU36&eI;sK6zkS)k4c0 ze-gS(Lu|3j`1cpMEC8kRcbC!LZRJrl7nMW7CipI3*nEnWz1_--(Ov9VVms{EU65nR zrTvUznQ~rbhfLm1BJRhWY3X0iuH5HlLk|7QdEg-O^?e=AOY`XMS7PxcNu@Zi*(a2h zHhVj;gQ7OMAIIc7DwMj5ysOAZ9spV~x zI!g1)LWi)VL2X%zULDnX2Ez$>c)&tHQFq>XUMudELtP@?OFiJjwcKlItI88|F0wOc zFJ52Do6@nwCwfv`g8lW_@Kk=D;%GDs z8L56QVja!oviPk??cQwHQ|+GBykfj*bUiV9R55INj&tL>jg+-^@(SuA(idn3_yFPkbV$vqa2`%RSpV8Qya2R!6dNIkN zpV3{KMyZ5$^p4AU005!bVfvoZ7C3VrO5l*s*D4%x_0c~PSBZ~Cj6Or$Kz>qe7J43p z-7!8%l5tq!WED8pZ7By9eEt{Y&VSK&gTAxOQ>U~PP5Heb?4w_`s|CN9F| z7W0Ynfej^!%b4eRBvD^I%&SzM&fD#3$X&w7k?%Mn$O%vNeNq-ri4NHnq!f>06;+nb z;Eq>Q|DqQ#q`YH}i|#ShHus{N#nN^h_ky$qK!qhRDhO@3*)EHq0#c-z8c6#Btwsxq8~JE?Ug#0oC59G4+&{wvCR zCw~L(8;bWzU(r4Fl&A*3FnKTF5mToi>-lWly|60n7GcN#&CUjJ!P6Q}di~es`Q7VR zyoK-}AC?3Xlk^1^H<%p_*mATmgcE~QotcVMTom4SrsBb{d^lYBj$S6u{1Kj=D-J}u z6hIJYy?fTZGffLnHGJ{^{J@BCf*?o84Bvrq0czSosLB7R;3ja2N${i(qgt}EJ-{K9 zFJ+g)j1XZ~xf6eRw!ZyZ29$B(8H!L z3fIGrtC(?u$ivktXRw7f(q}F(RsLbjtI1`5fzz2jaT_wIb1Gf&y~#^T)>1ZCm*&ne`EEd-pfs=kTGW5X%m(NPyOAtW!_Mb&usi@pKw#|m zn7C23Tggio(yQBi#r-;M2dFw>>u=@tpBo&|5l>Oah#bBii?I8(PmPdb25=w(!@&wz ziGfE-0UV75Q4#WJq4!LMz^Rm%XJ9Ghalo^47J%LWA_E~h01B>rgOQ?JBByJ9NnUjsK4-1?10*nd9H2Ycxf_DEKcIx_wc< zBty@W{kd6*LDjf4rz)U0EU1q-SXDUBsS15{@@LjpW^J0b62mVn$#+(>Iv%KoBbe)u z>X5zI*=+w^XSl5qwrKU#TzShuM4zDo&Mc%=tEY3cs_dxe%lg3=bKUgo0TJa^kppu%>^JvOt+{Yx_r%8EO@yie=8 zA>%4dqhKq)@Ej&&qJfnW#7^KW2unaW^qzbf5v;&ykTUEy7=#b#=d(5dSi&c5tj->| z+Xx6Yo}ZUagDVqJK@d);45P+85%Pkd1Y}tka~Gm@rc(@d5<9`pGft_H!s6`r>0z#{ zasgdL7jxzDoIpK@L}9e3Na5wJ2EAlK0SrjsDVLfz!KB^&CflzAA+>@@t%q;UFAE1N zvNuB8o`(Bw1PEg_2#aCYWbN7q2(U%FV&=(l_v>KfdVO+lr*<8^Pr zJsXdI5^AJZjLIRM*qC62D9=1-EvQtW;UpxHQl0rpMB5s)@u#wGK_Di?gFhR7L!5zf zmhlIQc1!tM1jXjzWKQ|HxkK=|62K=S`6cgFYZH3rQ`gqeXDO@zbZwuBq8wp!}SnNaD#Ld*noz7cl|IY6- z`nVf(aVMR7M3Tt7fPvRz_w>T(Yx(}FHK+2#IW?MueEqDUl9$|lSQsx2K zixtXVLE>#oU{_ZRw8mixzU&6?A`Pk1gRKyDfZn16mh%6=;YwaYXjo9i&ulKX8;HXw zCkbg=Z$lnd>|Cd1-T0KsHIz)YRDjFSO~D>Ub^T)Js_eqU}r#>4Z$v` zTYVrTKpQA6#1^1LB>r8Al;ZvaqOB*clA{*}u-;H2>zm$Mf?hQxwl~RafY(s#31oq{ z_A;2}=bH>6%~o?%BR2-M$4cc`L2-C5kYcteegCDOQ@^nwF& zfmQZ~(hNedpmPOHHY82g!Q)W4BNpm%r2iH&KHwOrB*Cyy*g8X`ER@Em{+fl2b^ zlmO*Q!(Aqlntq{0RS+4+!DI}G7b0k%-5kdlFgbx5RAnu=l=@`P-qV4^@fjUl`HiK9 zQ2i*`>c*r)MKFm|#-Dc9H6Wx>~o% zbRFZq6$9uh+oAk7fg%bCl&z9}mp2vyO}#Q4j1dRdH`-4@W(da{d%g6Z`NLB;t~Sj6 zkA#SN`1FZ7Hcmz`Pr@2+vylNNkU?O4V%TXQ0e2fh383*d2oWR*6WbbUi@=y7u1*$W ziWq_^YYLM8X)?a&dXE0j`x8_5N*tgDi^hJFISb-5Y`AP zcwm(Ck45so=wkn4VN*4f6{$eDME*5e4j?xXK<=0qJS-FsbMO&n@3Q}niT|OS{y($h zZ&d_$yD+YGR)MY^8V)0x>7RQAsM!|ckM~Kfe`9KB{yOp^9tnpnevd;-B@?5JwCe+zVP_pcd4@2ehEj;M!Ma z$GB&wM~))YlHicp`Tri}|6RA(e%CEq|IHf>lT0z<0l9Qom^8Q0RD?qi^6A+p{}rUn zw=2jD+i!N(5o+F`St%U9QZqGZz7N41_&)>qZ^|${lomY){#9UNImU1x3c49FW;U`l zxEL8l6SP|v*oiX9!c;vY}QjZ(RjwHj>6?|T3l47zNS#zlKN@5 zJ{Dh%kipJ@zchXD=1?d41n~o>{ZFx{Cy3?JzZgC976hBQTmLH!^Nd)cLS0)`t>w&U zm0>%1F!+4F&1KG*3O|lRJVH$LRgo#@)H>hR3R}IOp<**Z=^q^pJr@%m33%vJ1_oFigRmxtab;DBg?DqSAS5Zp<$Bj ztLgru^ng!-ojMowV{%R6SW?l&p@7HD9uB?q@gF!kuQzAD*Gf}1@}rkss>Sn6KBpyg zDm4Gn@#RW$chV#_qr0}tfm)_f0zTxLihfLPU>u^ZFDRGU5y~P4^1G&oJ_!a`T+X0G zDtK|0khD>goq*eF$Oci+>TqQp9zI)}zCYf^kKiRgVR-*Z_eOng3 z1`b{Z_oZV=Est9LGK1${0|zgI!>@sZ&&*|&X+I#Zfq$BR4II1@p7w)Qz!~rwICvTS z`)lCfd%ziL^P@lh{u(&;cFQB)udr)JYQ?LZOSBl3 z%x+Yz%vN7@zfsjjvO~h{Y<$MjQOl=#dx)wH(7lX%>8q&u=EQ^WUuM6cB0Hiy;F~I? z;gx3ly`YB~DQ*K-yx}X(YvG4Q`9gpBp_bs{XH;|Gr)*b)QarT}GI=_GGidaN<5cUALE;>+c=jJ5$(>vop5&{Is`zJwZPknvh_rCL;^FKFtX72oE?#%Db ziTd@mYgcOSG+(hQ0PAD8A}!Iy7mU0So*Y}0s%|E;8wMM1MO0>#*odH(`@P>3K>;2Tmn>nA8@2KvtD&%SR-hzuES6V(lm3VEWSx!zWu zLhNwT~$y$3$U(ptrT6I@!SD z-F(noN6lbzo7;RIey#&2Xh;amw3scRhurZFo~};d1r!m3LfA9BLZ(PKlX+5|_c%oG zw#s}SSAZWYL(*Fo7D5(2X2EJ_m1cO&n`2 zi;YyfX|c-`Yr59d()3JM@q^3mE6ZfA%C46;*@WHE?o^z0`BZ4JOsK4k{OV0>cSJdD z%i4c3^r;L<_Ulb&oNl>?UIjM&PPa~ll4bX9>ZccnjSKts2qr>1Lc6|e|E}}9vM?gy z{nS7^cO2QL_><}uH50YfYT9bK=TA7*JcxHUD)YL^UC)F&__2DOYO>-_8C7_Na%GsZ z4)Ux`PCM^22}Lcg!FaBgqo`z6*{X`L;;^S-#tQ41TOD?@PD>#wyvSVe0>sV-M^CVe zB@pF|)BOo4zE?S0XO^|;?N6Zlk~#31Wou zH^GGzck(wA6xH5@VCOvd9QGCN^%Y(Z<8D^@{aEri1YS8DX`mM#es-M-NCy&ZnIDDS z*>SKm4Y8i|B`5`Q(lVU-Oe6Y$CL5tZqJn%Zztwn|i%D20^09gR4kQ}sitwNBL?#Kv`Te>ohC!hlivw$w;Ofi=~R zqzN)an}d3Q1#->u8DiR!T8)^dLNzs%dbH5`N%wFD5#^ScUl3cd z)C@Beu93(upM*zH^R1{Psi;$;~S(O z2FyPG#Ogg_EAFFH3drL~st$$%SO{5!CJ019Nm?D$c`?8?KM7hmifSXdurVmjq$-MT z9aYfHm>aM;0Qp!ed$30_T8K!Dg;%XX#&tsvlA8^`1L_>CEkW{$+a{@(#68dYOQ62$ zB*5nx+C@iNr|o3JZpKa}aTAcKUm<~e1`k+~vrEuAa3Esi5jz}p1KF?=xJg>lr5O4- zEY%9|fbttDV2?1mH4aEOBR#1bssi?ip|6Ld%=p^K*X>XWxThFRG(=9fLu0``&(K8J zXKahnhUggjB-0>ogkVLYlca@YAYP9mtjNuoZQ7XBcR{JrTEox*rjo;Z*3qE*lBDeb z3TQ`)=iH=9>nu8g0_!ZWrW1-J<3`_xekf?VgnZ)S0NLuBn?*^Q##rRmH9FV?H4KCZ z#!?TFaotch(uzD*g4TyuMbjJAAn+^IPi!=0A5L();R^x3V2F7DIn#FWPnwZOrUEFlX-wt(x(E! zVv=UZ(Qp$O1PQ@ml4d@4kEL1zwFQG|F?0|d1wr933S?9zngmV*0cDm$rgbwAWP5;F zv@p6o4x@?80H{?}g5C~?Mbj|=w|d$cR4}a=eQ`{{$6+xjGOC+l$`@W*OpWkl0Ul;I z$8lSLr==5*8!W(A9VyC;mB7m&D?bd8-DCEh{Xnawr3EIuqxQi9Lz5dNX$`vsZG*!cl(g?dL`jSP>JUnQbP1r}Kp6c|H-il3mIDODsDrr> zq<=RbMU@@^^u9QGh2Vg9G+M%6692*(@aA+WXp5yi*FXF)R3;>kjLVif{ zK>FeZf#JIt;P2G3NLSNWcKWXr&4tYRVGhJ{J-N9R_)oA$j%k1$E+JdKI=`*~)bM7N zPDcgg*dc2_#orL%*tI*qsWCeNx_C^J{q$SGWvkpSkd4@OdL5mFT7S|Hf~#(sb@)Nhf8*B{1*K%ucR2!+P0<= z$X*=DeKC?0WA&lNP{bMF_)5wN(%vXb>8*C4<)pV!fJ{>4n@q0N0#_Xbf8VFJi{wvE z6(_V8`4YI@$wYcT0G(Bfz5oO~zxm(UR#Ke$pnHKbvHZSD#P2%v=E|bqpKZn3P4LBV z&q0A5i++E${W+Bg5K;I4FM7^X*CiGy1@VUT-@g0Oqc6EdSZH)$=h7@^evNX)OQ~y6Oc!7y0Zsg1Vh25WI>2HwXQh@oNwEg+PC_rWsm&lMM_h{)= zTKY432yr{s(=kxws7sI&?cfi+D77FFb{TbCVwZS%GLg>N&F=mT?z!A@{=j8!hC@z< z^6yL~hdu204Y%{%TJLZpbhjSeXO_9g+s4`Z%+Lh1O_d5^syfJfPyd5H$X&_2!VW(3 zl0Oj6Ej?-;*?-AD!9hK#8#vhB%<}iRM`t(eJ{x%W1}ng6C#y$`c;8WuwW(?Bt(iCo zfN>&ekn3}l5M+Og-Jrm6R;g;<@10(A@uNr1PYWL37%zB)$1ixKvbVErJbY5w87v$3 zfA_PU<#WbIsma*G=b(2&cHTN~m7^_GwvAKEi{?8lpVH>5ViRD`1x)*nwp?MmI=8%R zzRwyeZ6-Gbc=&8?3dqgV^DcalM;Du}uvPC3QP_2&!E!#yQ6%hqS#1X;-@br1G~WMW zN}dyM{ZARx)U4O3Rihnnb)*9Yje8FjKF-U@a(}=h68U*NqS3wDvXo!X8O?20y2TsH zVHQ3*)$qLXB7cLU`Xx?!sewh`bdoQ}bRt-EVSX&98J?J1_~a1L0Y4}(U}_27`S%oU z>J%Lt+yf(+iWS^e)}4dB3XbYzQj01vR}jY?KU|-{y`q;K5!%MmKQ?U|?iV<c zvT#s@pMzIfsJ}z5Xw$c}jo1AlhSIy|2iy2`WAVlc(CFIgW=(F>Rc=%DR>s+Pv}WaL zNCXXu5TGd$c1i?eEr|e^2$Nd6s)Rs?xBHkAi9gQN!U&!;P7aFaoHoL$U#8ma?Y_*3 zM8}z0JwhLiGmhe2OdEMzAGka<`fVX&!WA>44 z|9XnHr3lpJcybo?>v7Vk-gzRVHuLE;&7=s_=h!)m`uVukd%bl;NJ*yl6m4}Os0;p0 zlG0}`eem;1N}sn-+TrVxK6mMcTP0CGbNQ!VeURMz>vt+VQbzctlO?7KaF^Fk73iOPwcx(_))YDJ6yVMM69g_3S!nUx=eiOEJmGBIPQ zGzR7xDvgDyLZvxmG?E>%6Iwuz>P;%mT`FNTmVKoj?XmrK7%kO)yB;muKI2Cin@)R- z8@XxRckJ!d6Tjx0era8T+OvMOG%{~rdJWuhFYymXIa+6L-tGHeO{}Q{z({Y}3t}8$ z(JH2n*H92#SRcwc1M$=QV*qNjO7P+BdIEht{Aq__SBJ^xtT%