From c83267bdb13b8b7e6dbd9bee3ac66b44b2298bc4 Mon Sep 17 00:00:00 2001 From: oyurdakul Date: Sat, 17 Jun 2023 16:56:37 +0200 Subject: [PATCH 1/5] SUC Benchmarking --- Project.toml | 8 + docs/src/usage.md | 47 +++ src/UnitCommitment.jl | 2 + .../methods/ProgressiveHedging/optimize.jl | 8 +- .../ProgressiveHedging/ph_benchmark.jl | 307 ++++++++++++++++++ .../methods/ProgressiveHedging/ph_subp.jl | 41 +++ .../methods/ProgressiveHedging/structs.jl | 6 + 7 files changed, 417 insertions(+), 2 deletions(-) create mode 100644 src/solution/methods/ProgressiveHedging/ph_benchmark.jl create mode 100644 src/solution/methods/ProgressiveHedging/ph_subp.jl diff --git a/Project.toml b/Project.toml index 2a45202..49a99f0 100644 --- a/Project.toml +++ b/Project.toml @@ -5,7 +5,10 @@ repo = "https://github.com/ANL-CEEESA/UnitCommitment.jl" version = "0.3.0" [deps] +CSV = "336ed68f-0bac-5ca0-87d4-7b16caf5d00b" +DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0" DataStructures = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8" +DelimitedFiles = "8bb1440f-4735-579b-a4ab-409b98df4dab" Distributed = "8ba89e20-285c-5b6f-9357-94700520ee1b" Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f" GZip = "92fee26a-97fe-5a0c-ad85-20a5f3185b63" @@ -18,11 +21,15 @@ PackageCompiler = "9b87118b-4619-50d2-8e1e-99f35a4d4d9d" Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" +Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" TimerOutputs = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f" MPI = "da04e1cc-30fd-572f-bb4f-1f8673147195" [compat] +CSV = "0.10" +DataFrames = "1.5" DataStructures = "0.18" +DelimitedFiles = "1.6" Distributions = "0.25" GZip = "0.5" JSON = "0.21" @@ -30,5 +37,6 @@ JuMP = "1" MathOptInterface = "1" MPI = "0.20" PackageCompiler = "1" +Statistics = "1.9" julia = "1" TimerOutputs = "0.5" \ No newline at end of file diff --git a/docs/src/usage.md b/docs/src/usage.md index d141edc..fd14fab 100644 --- a/docs/src/usage.md +++ b/docs/src/usage.md @@ -194,6 +194,53 @@ Each process solves a sub-problem with $\frac{s}{p}$ scenarios, where $s$ is the Currently, PH can handle only equiprobable scenarios. Further, `solution(model, ph)` can only handle cases where only one scenario is modeled in each process. +## Benchmarking Solution Methods + +The package has a built-in function that serves to compare the performance of the supported solution methods (currently including the extensive form and progressive hedging) in solving different benchmark instances. The following example shows how the built-in `run_ph_benchmark` function can be used to evaluate the performance of the supported solution methods. + +```julia +using CSV +using UnitCommitment + +# The date of observation used in creating scenarios. +# Can be used to generate scenarios based on different underlying conditions, +# such as different seasons or days of the week. +const DATE = "2017-01-01" + +# A dictionary specifying the test systems used for benchmarking. +# For each test system, SUC models with several scenario numbers can be constructed. +# For each test system and scenario number, the creation of the scenarios and the +# solution of the SUC problem can be repeated multiple times, specified by "number of runs". +const BENCHMARK_CASES = Dict( + "case14" => + Dict( + "scenario numbers" => [4, 6], + "number of runs" => 3 + ), + "case118" => + Dict( + "scenario numbers" => [2, 4], + "number of runs" => 2 + ), +) + +# 1. Run benchmark implementations, retrieve the results +benchmark_results = UnitCommitment.run_ph_benchmark(BENCHMARK_CASES, date = DATE) + +# 2. Create a data frame that reports the benchmark results in detail +detailed_table = UnitCommitment.fetch_ph_benchmark_detailed_df(benchmark_results) + +# 3. Write the data frame to a CSV file +CSV.write("detailed_table.csv", detailed_table) + +# 4. Create a data frame that summarizes the benchmark results +summary_table = UnitCommitment.fetch_ph_benchmark_summary_df(benchmark_results) + +# 5. Write the data frame to a CSV file +CSV.write("summary_table.csv", summary_table) + +``` + ## Computing Locational Marginal Prices diff --git a/src/UnitCommitment.jl b/src/UnitCommitment.jl index 40016f7..f320855 100644 --- a/src/UnitCommitment.jl +++ b/src/UnitCommitment.jl @@ -53,6 +53,8 @@ include("solution/methods/XavQiuWanThi2019/optimize.jl") include("solution/methods/ProgressiveHedging/optimize.jl") include("solution/methods/ProgressiveHedging/read.jl") include("solution/methods/ProgressiveHedging/solution.jl") +include("solution/methods/ProgressiveHedging/ph_benchmark.jl") +# include("solution/methods/ProgressiveHedging/ph_subp.jl") include("solution/optimize.jl") include("solution/solution.jl") include("solution/warmstart.jl") diff --git a/src/solution/methods/ProgressiveHedging/optimize.jl b/src/solution/methods/ProgressiveHedging/optimize.jl index 03831d8..0808600 100644 --- a/src/solution/methods/ProgressiveHedging/optimize.jl +++ b/src/solution/methods/ProgressiveHedging/optimize.jl @@ -6,7 +6,7 @@ using TimerOutputs import JuMP const to = TimerOutput() -function optimize!(model::JuMP.Model, method::ProgressiveHedging)::Nothing +function optimize!(model::JuMP.Model, method::ProgressiveHedging)::PHFinalResult mpi = MpiInfo(MPI.COMM_WORLD) iterations = PHIterationInfo[] consensus_vars = [var for var in all_variables(model) if is_binary(var)] @@ -57,7 +57,11 @@ function optimize!(model::JuMP.Model, method::ProgressiveHedging)::Nothing break end end - return + return PHFinalResult( + last(iterations).global_obj, + last(iterations).sp_vals, + last(iterations).total_elapsed_time, + ) end function compute_total_elapsed_time( diff --git a/src/solution/methods/ProgressiveHedging/ph_benchmark.jl b/src/solution/methods/ProgressiveHedging/ph_benchmark.jl new file mode 100644 index 0000000..271638f --- /dev/null +++ b/src/solution/methods/ProgressiveHedging/ph_benchmark.jl @@ -0,0 +1,307 @@ +using Base: Order +using DataStructures +using DataFrames, DelimitedFiles, Statistics +using MPI, Printf +using Glob, Gurobi, JuMP +const TEMPDIR = tempdir() +const INPUT_PATH = "$(TEMPDIR)/input_files/new_scenarios" +const FILENAME = "$(abspath(joinpath(pathof(UnitCommitment), "..")))solution/methods/ProgressiveHedging/ph_subp.jl" + +function _scenario_to_dict( + sc::UnitCommitment.UnitCommitmentScenario, +)::OrderedDict + json = OrderedDict() + json["Parameters"] = OrderedDict() + json["Parameters"]["Scenario name"] = sc.name + json["Parameters"]["Scenario weight"] = sc.probability + json["Parameters"]["Version"] = "0.3" + json["Parameters"]["Time (h)"] = sc.time + json["Parameters"]["Power balance penalty (\$/MW)"] = + sc.power_balance_penalty + if length(sc.reserves) > 0 + json["Reserves"] = OrderedDict() + for r in sc.reserves + r_dict = json["Reserves"][r.name] = OrderedDict() + r_dict["Type"] = r.type + r_dict["Amount (MW)"] = r.amount + r_dict["Shortfall penalty (\$/MW)"] = r.shortfall_penalty + end + end + json["Buses"] = OrderedDict() + total_load = sum([b.load for b in sc.buses]) + json["Buses"]["b1"] = OrderedDict() + json["Buses"]["b1"]["Load (MW)"] = total_load + json["Generators"] = OrderedDict() + for g in sc.thermal_units + g_dict = json["Generators"][g.name] = OrderedDict() + g_dict["Bus"] = "b1" + g_dict["Ramp up limit (MW)"] = g.ramp_up_limit + g_dict["Ramp down limit (MW)"] = g.ramp_down_limit + g_dict["Startup limit (MW)"] = g.startup_limit + g_dict["Shutdown limit (MW)"] = g.shutdown_limit + g_dict["Minimum uptime (h)"] = g.min_uptime + g_dict["Minimum downtime (h)"] = g.min_downtime + g_dict["Initial status (h)"] = g.initial_status + g_dict["Initial power (MW)"] = g.initial_power + g_dict["Must run?"] = g.must_run + g_dict["Startup delays (h)"] = + [st_c.delay for st_c in g.startup_categories] + g_dict["Startup costs (\$)"] = + [st_c.cost for st_c in g.startup_categories] + K = length(g.cost_segments) + 1 + T = length(g.min_power) + curve_mw = Matrix{Float64}(undef, T, K) + curve_cost = Matrix{Float64}(undef, T, K) + curve_mw[:, 1] = g.min_power + curve_cost[:, 1] = g.min_power_cost + for k in 1:K-1 + curve_mw[:, k+1] = curve_mw[:, k] + g.cost_segments[k].mw + curve_cost[:, k+1] = + curve_cost[:, k] + + (g.cost_segments[k].cost .* g.cost_segments[k].mw) + end + g_dict["Production cost curve (MW)"] = (curve_mw) + g_dict["Production cost curve (\$)"] = (curve_cost) + length(g.reserves) == 0 || + (g_dict["Reserve eligibility"] = [r.name for r in g.reserves]) + end + return json +end +function _create_new_scenarios( + s_num::Int, + r::Int, + system::String, + date::String, +)::String + path = "$(INPUT_PATH)/$(system)/snum_$(s_num)/run_$r" + benchmark = "matpower/$(system)/$(date)" + mkpath(path) + for sn in 1:s_num + sc = UnitCommitment.read_benchmark(benchmark).scenarios[1] + randomize!(sc, UnitCommitment.XavQiuAhm2021.Randomization()) + sc.name = "s$(sn)" + sc.probability = 1 / s_num + sc_dict = _scenario_to_dict(sc) + write("$(path)/s$(sn).json", sc_dict) + end + return path +end +function _solve_instance(scenarios::Vector{String})::AbstractDict + instance = UnitCommitment.read(scenarios) + model = UnitCommitment.build_model( + instance = instance, + optimizer = Gurobi.Optimizer, + formulation = Formulation(), + ) + set_optimizer_attribute(model, "Threads", length(instance.scenarios)) + time_stat = @timed UnitCommitment.optimize!(model) + extensive_solution = Dict( + "objective value" => objective_value(model), + "binary values" => + [value(var) for var in all_variables(model) if is_binary(var)], + "solution statistics" => time_stat, + ) + return extensive_solution +end +function _write_setup(system::String, snum::Int, r::Int) + open("$(INPUT_PATH)/setup.txt", "w") do file + return Base.write(file, @sprintf("%s,%d,%d", system, snum, r)) + end +end +function _retrieve_results( + extensive_solution::Dict, + solution_path::String, +)::OrderedDict + result = OrderedDict() + result["extensive form"] = OrderedDict() + result["extensive form"]["objective value"] = + extensive_solution["objective value"] + result["extensive form"]["wallclock time"] = + extensive_solution["solution statistics"].time + result["progressive hedging"] = OrderedDict() + open("$(solution_path)/1/global_obj.txt") do glob_obj + return global global_obj = split(Base.read(glob_obj, String))[1] + end + open("$(solution_path)/1/wallclock_time.txt") do wallcl_time + return global wallclock_time = split(Base.read(wallcl_time, String))[1] + end + result["progressive hedging"]["objective value"] = + parse(Float64, global_obj) + result["progressive hedging"]["wallclock time"] = + parse(Float64, wallclock_time) + extensive_binary_vals = extensive_solution["binary values"] + ph_binary_vals = + vec(readdlm("$(solution_path)/1/binary_vals.csv", ',', Float64)) + result["% similarity"] = + ( + 1 - ( + sum(abs.(extensive_binary_vals - ph_binary_vals)) / + length(extensive_binary_vals) + ) + ) * 100 + return result +end +function fetch_ph_benchmark_summary_df( + solution_stat::OrderedDict, +)::AbstractDataFrame + extensive_obj = Vector{Float64}(undef, 0) + ph_obj = Vector{Float64}(undef, 0) + extensive_time = Vector{Float64}(undef, 0) + ph_time = Vector{Float64}(undef, 0) + setup = Vector{String}(undef, 0) + similarity = Vector{Float64}(undef, 0) + for (system, system_result) in solution_stat + for (snum, snum_result) in system_result + push!(setup, @sprintf("%s (%s)", system, snum)) + push!( + extensive_obj, + round( + mean([ + snum_result[rep]["extensive form"]["objective value"] + for rep in keys(snum_result) + ]), + digits = 3, + ), + ) + push!( + ph_obj, + round( + mean([ + snum_result[rep]["progressive hedging"]["objective value"] + for rep in keys(snum_result) + ]), + digits = 3, + ), + ) + push!( + extensive_time, + round( + mean([ + snum_result[rep]["extensive form"]["wallclock time"] for + rep in keys(snum_result) + ]), + digits = 3, + ), + ) + push!( + ph_time, + round( + mean([ + snum_result[rep]["progressive hedging"]["wallclock time"] + for rep in keys(snum_result) + ]), + digits = 3, + ), + ) + push!( + similarity, + round( + mean([ + snum_result[rep]["% similarity"] for + rep in keys(snum_result) + ]), + digits = 3, + ), + ) + end + end + df = DataFrame( + hcat(setup, extensive_obj, ph_obj, extensive_time, ph_time, similarity), + [ + "system (scenario number)", + "objective value-extensive", + "objective value-ph", + "wallclock time-extensive", + "wallclock time-ph", + "% similarity", + ], + ) + return df +end +function fetch_ph_benchmark_detailed_df( + solution_stat::OrderedDict, +)::AbstractDataFrame + systems = Vector{String}(undef, 0) + scenario_numbers = Vector{Int}(undef, 0) + methods = Vector{String}(undef, 0) + run_indices = Vector{Int}(undef, 0) + objective_values = Vector{Float64}(undef, 0) + wallclock_times = Vector{Float64}(undef, 0) + similarities = Vector{Float64}(undef, 0) + for (system, system_result) in solution_stat + for (snum, snum_result) in system_result + for (run, run_result) in snum_result + for method in ["extensive form", "progressive hedging"] + push!(systems, system) + push!(scenario_numbers, snum) + push!(methods, method) + push!(run_indices, run) + push!( + objective_values, + round( + run_result[method]["objective value"], + digits = 3, + ), + ) + push!( + wallclock_times, + round(run_result[method]["wallclock time"], digits = 3), + ) + push!( + similarities, + round(run_result["% similarity"], digits = 3), + ) + end + end + end + end + df = DataFrame( + hcat( + systems, + scenario_numbers, + methods, + run_indices, + objective_values, + wallclock_times, + similarities, + ), + [ + "system", + "number of scenarios", + "method", + "run-x", + "objective value", + "wallclock time", + "% similarity", + ], + ) + return df +end +function run_ph_benchmark( + cases::AbstractDict; + date::String = "2017-01-01", +)::OrderedDict + mkpath(INPUT_PATH) + solution_stat = OrderedDict() + for system in keys(cases) + solution_stat[system] = OrderedDict() + for s_num in cases[system]["scenario numbers"] + solution_stat[system][s_num] = OrderedDict() + for r in 1:cases[system]["number of runs"] + solution_stat[system][s_num][r] = OrderedDict() + scenario_path = _create_new_scenarios(s_num, r, system, date) + solution_path = "$(TEMPDIR)/output_files/$(system)/snum_$(s_num)/run_$(r)" + mkpath(solution_path) + extensive_solution = + _solve_instance(glob("*.json", scenario_path)) + _write_setup(system, s_num, r) + mpiexec( + exe -> run(`$exe -n $s_num $(Base.julia_cmd()) $FILENAME`), + ) + result = _retrieve_results(extensive_solution, solution_path) + solution_stat[system][s_num][r] = result + end + end + end + return solution_stat +end diff --git a/src/solution/methods/ProgressiveHedging/ph_subp.jl b/src/solution/methods/ProgressiveHedging/ph_subp.jl new file mode 100644 index 0000000..7911103 --- /dev/null +++ b/src/solution/methods/ProgressiveHedging/ph_subp.jl @@ -0,0 +1,41 @@ +using MPI: MPI_Info, push! +using Gurobi, JuMP, MPI, Glob, DelimitedFiles, Printf, UnitCommitment +const TEMPDIR = tempdir() +input_path = "$(TEMPDIR)/input_files/new_scenarios" + +function _write_results( + final_result::UnitCommitment.PHFinalResult, + solution_path::String, +)::Nothing + rank = UnitCommitment.MpiInfo(MPI.COMM_WORLD).rank + rank_solution_path = "$(solution_path)/$(rank)" + mkpath(rank_solution_path) + if rank == 1 + open("$(rank_solution_path)/global_obj.txt", "w") do file + return Base.write(file, @sprintf("%s", final_result.obj)) + end + open("$(rank_solution_path)/wallclock_time.txt", "w") do file + return Base.write(file, @sprintf("%s", final_result.wallclock_time)) + end + writedlm( + "$(rank_solution_path)/binary_vals.csv", + final_result.vals, + ',', + ) + end +end +open("$(input_path)/setup.txt") do snum + return global system, s_num, r = split(Base.read(snum, String), ",") +end +new_scenario_path = "$(input_path)/$(system)/snum_$(s_num)/run_$r/" +solution_path = "$(TEMPDIR)/output_files/$(system)/snum_$(s_num)/run_$r" +MPI.Init() +ph = UnitCommitment.ProgressiveHedging() +instance = UnitCommitment.read(glob("*.json", new_scenario_path), ph) +model = UnitCommitment.build_model( + instance = instance, + optimizer = Gurobi.Optimizer, +) +final_result = UnitCommitment.optimize!(model, ph) +_write_results(final_result, solution_path) +MPI.Finalize() diff --git a/src/solution/methods/ProgressiveHedging/structs.jl b/src/solution/methods/ProgressiveHedging/structs.jl index 46f68f2..73492a6 100644 --- a/src/solution/methods/ProgressiveHedging/structs.jl +++ b/src/solution/methods/ProgressiveHedging/structs.jl @@ -58,6 +58,12 @@ Base.@kwdef mutable struct PHSubProblemParams target::Array{Float64,1} end +Base.@kwdef mutable struct PHFinalResult + obj::Float64 + vals::Array{Float64,1} + wallclock_time::Float64 +end + struct MpiInfo comm::Any rank::Int From 885ea73a27920e48bbb29cacb3c821ea2c68d74e Mon Sep 17 00:00:00 2001 From: oyurdakul Date: Sat, 17 Jun 2023 16:58:24 +0200 Subject: [PATCH 2/5] SUC Benchmarking --- src/UnitCommitment.jl | 1 - 1 file changed, 1 deletion(-) diff --git a/src/UnitCommitment.jl b/src/UnitCommitment.jl index f320855..d2fe453 100644 --- a/src/UnitCommitment.jl +++ b/src/UnitCommitment.jl @@ -54,7 +54,6 @@ include("solution/methods/ProgressiveHedging/optimize.jl") include("solution/methods/ProgressiveHedging/read.jl") include("solution/methods/ProgressiveHedging/solution.jl") include("solution/methods/ProgressiveHedging/ph_benchmark.jl") -# include("solution/methods/ProgressiveHedging/ph_subp.jl") include("solution/optimize.jl") include("solution/solution.jl") include("solution/warmstart.jl") From 580f0529bf074454df4ec46afbd736614cd4cbde Mon Sep 17 00:00:00 2001 From: oyurdakul Date: Sat, 17 Jun 2023 17:31:26 +0200 Subject: [PATCH 3/5] SUC Benchmarking --- Project.toml | 2 -- 1 file changed, 2 deletions(-) diff --git a/Project.toml b/Project.toml index 49a99f0..4debaa0 100644 --- a/Project.toml +++ b/Project.toml @@ -21,7 +21,6 @@ PackageCompiler = "9b87118b-4619-50d2-8e1e-99f35a4d4d9d" Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" -Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" TimerOutputs = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f" MPI = "da04e1cc-30fd-572f-bb4f-1f8673147195" @@ -37,6 +36,5 @@ JuMP = "1" MathOptInterface = "1" MPI = "0.20" PackageCompiler = "1" -Statistics = "1.9" julia = "1" TimerOutputs = "0.5" \ No newline at end of file From 8daed74f740bd7449b5b5ef0f79124a0ae617fcf Mon Sep 17 00:00:00 2001 From: oyurdakul Date: Sat, 17 Jun 2023 17:41:27 +0200 Subject: [PATCH 4/5] SUC Benchmarking --- Project.toml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/Project.toml b/Project.toml index 4debaa0..15299ec 100644 --- a/Project.toml +++ b/Project.toml @@ -11,6 +11,7 @@ DataStructures = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8" DelimitedFiles = "8bb1440f-4735-579b-a4ab-409b98df4dab" Distributed = "8ba89e20-285c-5b6f-9357-94700520ee1b" Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f" +Glob = "c27321d9-0574-5035-807b-f59d2c89b15c" GZip = "92fee26a-97fe-5a0c-ad85-20a5f3185b63" JSON = "682c06a0-de6a-54ab-a142-c8b1cf79cde6" JuMP = "4076af6c-e467-56ae-b986-b466b2749572" @@ -21,6 +22,7 @@ PackageCompiler = "9b87118b-4619-50d2-8e1e-99f35a4d4d9d" Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" +Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" TimerOutputs = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f" MPI = "da04e1cc-30fd-572f-bb4f-1f8673147195" @@ -30,11 +32,13 @@ DataFrames = "1.5" DataStructures = "0.18" DelimitedFiles = "1.6" Distributions = "0.25" +Glob = "1.3" GZip = "0.5" JSON = "0.21" JuMP = "1" MathOptInterface = "1" MPI = "0.20" PackageCompiler = "1" +Statistics = "1.6" julia = "1" TimerOutputs = "0.5" \ No newline at end of file From 00619738f4a5c1d9f06f3cdcc2f148ae78f3787b Mon Sep 17 00:00:00 2001 From: oyurdakul Date: Sat, 17 Jun 2023 17:47:55 +0200 Subject: [PATCH 5/5] SUC Benchmarking --- Project.toml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Project.toml b/Project.toml index 15299ec..420592d 100644 --- a/Project.toml +++ b/Project.toml @@ -12,6 +12,7 @@ DelimitedFiles = "8bb1440f-4735-579b-a4ab-409b98df4dab" Distributed = "8ba89e20-285c-5b6f-9357-94700520ee1b" Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f" Glob = "c27321d9-0574-5035-807b-f59d2c89b15c" +Gurobi = "2e9cd046-0924-5485-92f1-d5272153d98b" GZip = "92fee26a-97fe-5a0c-ad85-20a5f3185b63" JSON = "682c06a0-de6a-54ab-a142-c8b1cf79cde6" JuMP = "4076af6c-e467-56ae-b986-b466b2749572" @@ -33,6 +34,7 @@ DataStructures = "0.18" DelimitedFiles = "1.6" Distributions = "0.25" Glob = "1.3" +Gurobi = "1.0" GZip = "0.5" JSON = "0.21" JuMP = "1"