diff --git a/src/Cuts/tableau/gmi_dual.jl b/src/Cuts/tableau/gmi_dual.jl index 42184a3..cc6bde4 100644 --- a/src/Cuts/tableau/gmi_dual.jl +++ b/src/Cuts/tableau/gmi_dual.jl @@ -321,8 +321,8 @@ function collect_gmi_FisSal2011( gapcl_best_history = CircularBuffer{Float64}(gapcl_best_patience) gapcl_curr = 0 last_print_time = 0 - multipliers_best = nothing - multipliers_curr = nothing + multipliers_best = Float64[] + multipliers_curr = Float64[] obj_best = nothing obj_curr = nothing obj_hist = CircularBuffer{Float64}(100) @@ -426,7 +426,7 @@ function collect_gmi_FisSal2011( if obj_best === nothing || obj_curr > obj_best log_prefix = '*' obj_best = obj_curr - multipliers_best = multipliers_curr + copy!(multipliers_best, multipliers_curr) end if round == 1 obj_initial = obj_curr @@ -443,7 +443,7 @@ function collect_gmi_FisSal2011( end if count_deterioration >= 10 μ *= 0.5 - multipliers_curr = multipliers_best + copy!(multipliers_curr, multipliers_best) count_deterioration = 0 count_backtrack += 1 elseif length(obj_hist) >= 100 @@ -501,7 +501,6 @@ function collect_gmi_FisSal2011( end end end - # TODO: Reduce allocations and improve performance @timeit "Append unique cuts" begin if round == 1 pool = ConstraintSet( @@ -516,19 +515,44 @@ function collect_gmi_FisSal2011( pool_cut_age = zeros(ncuts_unique) else if !isempty(unique_indices) - ncuts_unique = length(unique_indices) - pool.lhs = [pool.lhs sparse(cuts_s.lhs[unique_indices, :]')] - pool.lb = [pool.lb; cuts_s.lb[unique_indices]] - pool.ub = [pool.ub; cuts_s.ub[unique_indices]] - pool.hash = [pool.hash; cuts_s.hash[unique_indices]] - multipliers_curr = [multipliers_curr; zeros(ncuts_unique)] - multipliers_best = [multipliers_best; zeros(ncuts_unique)] - pool_cut_age = [pool_cut_age; zeros(ncuts_unique)] + @timeit "Append LHS" begin + # Transpose cuts matrix for better performance + new_cuts_lhs = sparse(cuts_s.lhs[unique_indices, :]') + + # Resize existing matrix in-place to accommodate new columns + old_cols = pool.lhs.n + new_cols = new_cuts_lhs.n + total_cols = old_cols + new_cols + resize!(pool.lhs.colptr, total_cols + 1) + + # Append new column pointers with offset + old_nnz = nnz(pool.lhs) + for i in 1:new_cols + pool.lhs.colptr[old_cols + i + 1] = old_nnz + new_cuts_lhs.colptr[i + 1] + end + + # Expand rowval and nzval arrays + append!(pool.lhs.rowval, new_cuts_lhs.rowval) + append!(pool.lhs.nzval, new_cuts_lhs.nzval) + + # Update matrix dimensions + pool.lhs = SparseMatrixCSC(pool.lhs.m, total_cols, pool.lhs.colptr, pool.lhs.rowval, pool.lhs.nzval) + end + @timeit "Append others" begin + ncuts_unique = length(unique_indices) + append!(pool.lb, cuts_s.lb[unique_indices]) + append!(pool.ub, cuts_s.ub[unique_indices]) + append!(pool.hash, cuts_s.hash[unique_indices]) + append!(multipliers_curr, zeros(ncuts_unique)) + append!(multipliers_best, zeros(ncuts_unique)) + append!(pool_cut_age, zeros(ncuts_unique)) + end end end end end + @timeit "Prune the pool" begin pool_size_mb = Base.summarysize(pool) / 1024^2 while pool_size_mb >= max_pool_size_mb @@ -624,7 +648,7 @@ function collect_gmi_FisSal2011( if obj_curr > obj_best log_prefix = '*' obj_best = obj_curr - multipliers_best = multipliers_curr + copy!(multipliers_curr, multipliers_best) end gapcl_curr = gapcl(obj_curr) gapcl_best = gapcl(obj_best)