collect_gmi_FisSal2011: Accelerate appending unique cuts

fs11_01
Alinson S. Xavier 2 months ago
parent 8f3eb8adc4
commit 84acd6b72c

@ -321,8 +321,8 @@ function collect_gmi_FisSal2011(
gapcl_best_history = CircularBuffer{Float64}(gapcl_best_patience) gapcl_best_history = CircularBuffer{Float64}(gapcl_best_patience)
gapcl_curr = 0 gapcl_curr = 0
last_print_time = 0 last_print_time = 0
multipliers_best = nothing multipliers_best = Float64[]
multipliers_curr = nothing multipliers_curr = Float64[]
obj_best = nothing obj_best = nothing
obj_curr = nothing obj_curr = nothing
obj_hist = CircularBuffer{Float64}(100) obj_hist = CircularBuffer{Float64}(100)
@ -426,7 +426,7 @@ function collect_gmi_FisSal2011(
if obj_best === nothing || obj_curr > obj_best if obj_best === nothing || obj_curr > obj_best
log_prefix = '*' log_prefix = '*'
obj_best = obj_curr obj_best = obj_curr
multipliers_best = multipliers_curr copy!(multipliers_best, multipliers_curr)
end end
if round == 1 if round == 1
obj_initial = obj_curr obj_initial = obj_curr
@ -443,7 +443,7 @@ function collect_gmi_FisSal2011(
end end
if count_deterioration >= 10 if count_deterioration >= 10
μ *= 0.5 μ *= 0.5
multipliers_curr = multipliers_best copy!(multipliers_curr, multipliers_best)
count_deterioration = 0 count_deterioration = 0
count_backtrack += 1 count_backtrack += 1
elseif length(obj_hist) >= 100 elseif length(obj_hist) >= 100
@ -501,7 +501,6 @@ function collect_gmi_FisSal2011(
end end
end end
end end
# TODO: Reduce allocations and improve performance
@timeit "Append unique cuts" begin @timeit "Append unique cuts" begin
if round == 1 if round == 1
pool = ConstraintSet( pool = ConstraintSet(
@ -516,19 +515,44 @@ function collect_gmi_FisSal2011(
pool_cut_age = zeros(ncuts_unique) pool_cut_age = zeros(ncuts_unique)
else else
if !isempty(unique_indices) if !isempty(unique_indices)
ncuts_unique = length(unique_indices) @timeit "Append LHS" begin
pool.lhs = [pool.lhs sparse(cuts_s.lhs[unique_indices, :]')] # Transpose cuts matrix for better performance
pool.lb = [pool.lb; cuts_s.lb[unique_indices]] new_cuts_lhs = sparse(cuts_s.lhs[unique_indices, :]')
pool.ub = [pool.ub; cuts_s.ub[unique_indices]]
pool.hash = [pool.hash; cuts_s.hash[unique_indices]] # Resize existing matrix in-place to accommodate new columns
multipliers_curr = [multipliers_curr; zeros(ncuts_unique)] old_cols = pool.lhs.n
multipliers_best = [multipliers_best; zeros(ncuts_unique)] new_cols = new_cuts_lhs.n
pool_cut_age = [pool_cut_age; zeros(ncuts_unique)] total_cols = old_cols + new_cols
resize!(pool.lhs.colptr, total_cols + 1)
# Append new column pointers with offset
old_nnz = nnz(pool.lhs)
for i in 1:new_cols
pool.lhs.colptr[old_cols + i + 1] = old_nnz + new_cuts_lhs.colptr[i + 1]
end
# Expand rowval and nzval arrays
append!(pool.lhs.rowval, new_cuts_lhs.rowval)
append!(pool.lhs.nzval, new_cuts_lhs.nzval)
# Update matrix dimensions
pool.lhs = SparseMatrixCSC(pool.lhs.m, total_cols, pool.lhs.colptr, pool.lhs.rowval, pool.lhs.nzval)
end
@timeit "Append others" begin
ncuts_unique = length(unique_indices)
append!(pool.lb, cuts_s.lb[unique_indices])
append!(pool.ub, cuts_s.ub[unique_indices])
append!(pool.hash, cuts_s.hash[unique_indices])
append!(multipliers_curr, zeros(ncuts_unique))
append!(multipliers_best, zeros(ncuts_unique))
append!(pool_cut_age, zeros(ncuts_unique))
end
end end
end end
end end
end end
@timeit "Prune the pool" begin @timeit "Prune the pool" begin
pool_size_mb = Base.summarysize(pool) / 1024^2 pool_size_mb = Base.summarysize(pool) / 1024^2
while pool_size_mb >= max_pool_size_mb while pool_size_mb >= max_pool_size_mb
@ -624,7 +648,7 @@ function collect_gmi_FisSal2011(
if obj_curr > obj_best if obj_curr > obj_best
log_prefix = '*' log_prefix = '*'
obj_best = obj_curr obj_best = obj_curr
multipliers_best = multipliers_curr copy!(multipliers_curr, multipliers_best)
end end
gapcl_curr = gapcl(obj_curr) gapcl_curr = gapcl(obj_curr)
gapcl_best = gapcl(obj_best) gapcl_best = gapcl(obj_best)

Loading…
Cancel
Save