pull/33/merge
Ogün Yurdakul 2 years ago committed by GitHub
commit 5066d44bdc
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -5,9 +5,14 @@ repo = "https://github.com/ANL-CEEESA/UnitCommitment.jl"
version = "0.3.0" version = "0.3.0"
[deps] [deps]
CSV = "336ed68f-0bac-5ca0-87d4-7b16caf5d00b"
DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
DataStructures = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8" DataStructures = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8"
DelimitedFiles = "8bb1440f-4735-579b-a4ab-409b98df4dab"
Distributed = "8ba89e20-285c-5b6f-9357-94700520ee1b" Distributed = "8ba89e20-285c-5b6f-9357-94700520ee1b"
Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f" Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f"
Glob = "c27321d9-0574-5035-807b-f59d2c89b15c"
Gurobi = "2e9cd046-0924-5485-92f1-d5272153d98b"
GZip = "92fee26a-97fe-5a0c-ad85-20a5f3185b63" GZip = "92fee26a-97fe-5a0c-ad85-20a5f3185b63"
JSON = "682c06a0-de6a-54ab-a142-c8b1cf79cde6" JSON = "682c06a0-de6a-54ab-a142-c8b1cf79cde6"
JuMP = "4076af6c-e467-56ae-b986-b466b2749572" JuMP = "4076af6c-e467-56ae-b986-b466b2749572"
@ -18,17 +23,24 @@ PackageCompiler = "9b87118b-4619-50d2-8e1e-99f35a4d4d9d"
Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
TimerOutputs = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f" TimerOutputs = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f"
MPI = "da04e1cc-30fd-572f-bb4f-1f8673147195" MPI = "da04e1cc-30fd-572f-bb4f-1f8673147195"
[compat] [compat]
CSV = "0.10"
DataFrames = "1.5"
DataStructures = "0.18" DataStructures = "0.18"
DelimitedFiles = "1.6"
Distributions = "0.25" Distributions = "0.25"
Glob = "1.3"
Gurobi = "1.0"
GZip = "0.5" GZip = "0.5"
JSON = "0.21" JSON = "0.21"
JuMP = "1" JuMP = "1"
MathOptInterface = "1" MathOptInterface = "1"
MPI = "0.20" MPI = "0.20"
PackageCompiler = "1" PackageCompiler = "1"
Statistics = "1.6"
julia = "1" julia = "1"
TimerOutputs = "0.5" TimerOutputs = "0.5"

@ -205,6 +205,53 @@ Each process solves a sub-problem with $\frac{s}{p}$ scenarios, where $s$ is the
Currently, PH can handle only equiprobable scenarios. Further, `solution(model, ph)` can only handle cases where only one scenario is modeled in each process. Currently, PH can handle only equiprobable scenarios. Further, `solution(model, ph)` can only handle cases where only one scenario is modeled in each process.
## Benchmarking Solution Methods
The package has a built-in function that serves to compare the performance of the supported solution methods (currently including the extensive form and progressive hedging) in solving different benchmark instances. The following example shows how the built-in `run_ph_benchmark` function can be used to evaluate the performance of the supported solution methods.
```julia
using CSV
using UnitCommitment
# The date of observation used in creating scenarios.
# Can be used to generate scenarios based on different underlying conditions,
# such as different seasons or days of the week.
const DATE = "2017-01-01"
# A dictionary specifying the test systems used for benchmarking.
# For each test system, SUC models with several scenario numbers can be constructed.
# For each test system and scenario number, the creation of the scenarios and the
# solution of the SUC problem can be repeated multiple times, specified by "number of runs".
const BENCHMARK_CASES = Dict(
"case14" =>
Dict(
"scenario numbers" => [4, 6],
"number of runs" => 3
),
"case118" =>
Dict(
"scenario numbers" => [2, 4],
"number of runs" => 2
),
)
# 1. Run benchmark implementations, retrieve the results
benchmark_results = UnitCommitment.run_ph_benchmark(BENCHMARK_CASES, date = DATE)
# 2. Create a data frame that reports the benchmark results in detail
detailed_table = UnitCommitment.fetch_ph_benchmark_detailed_df(benchmark_results)
# 3. Write the data frame to a CSV file
CSV.write("detailed_table.csv", detailed_table)
# 4. Create a data frame that summarizes the benchmark results
summary_table = UnitCommitment.fetch_ph_benchmark_summary_df(benchmark_results)
# 5. Write the data frame to a CSV file
CSV.write("summary_table.csv", summary_table)
```
## Computing Locational Marginal Prices ## Computing Locational Marginal Prices

@ -57,6 +57,7 @@ include("solution/methods/TimeDecomposition/optimize.jl")
include("solution/methods/ProgressiveHedging/optimize.jl") include("solution/methods/ProgressiveHedging/optimize.jl")
include("solution/methods/ProgressiveHedging/read.jl") include("solution/methods/ProgressiveHedging/read.jl")
include("solution/methods/ProgressiveHedging/solution.jl") include("solution/methods/ProgressiveHedging/solution.jl")
include("solution/methods/ProgressiveHedging/ph_benchmark.jl")
include("solution/optimize.jl") include("solution/optimize.jl")
include("solution/solution.jl") include("solution/solution.jl")
include("solution/warmstart.jl") include("solution/warmstart.jl")

@ -6,7 +6,7 @@ using TimerOutputs
import JuMP import JuMP
const to = TimerOutput() const to = TimerOutput()
function optimize!(model::JuMP.Model, method::ProgressiveHedging)::Nothing function optimize!(model::JuMP.Model, method::ProgressiveHedging)::PHFinalResult
mpi = MpiInfo(MPI.COMM_WORLD) mpi = MpiInfo(MPI.COMM_WORLD)
iterations = PHIterationInfo[] iterations = PHIterationInfo[]
consensus_vars = [var for var in all_variables(model) if is_binary(var)] consensus_vars = [var for var in all_variables(model) if is_binary(var)]
@ -57,7 +57,11 @@ function optimize!(model::JuMP.Model, method::ProgressiveHedging)::Nothing
break break
end end
end end
return return PHFinalResult(
last(iterations).global_obj,
last(iterations).sp_vals,
last(iterations).total_elapsed_time,
)
end end
function compute_total_elapsed_time( function compute_total_elapsed_time(

@ -0,0 +1,307 @@
using Base: Order
using DataStructures
using DataFrames, DelimitedFiles, Statistics
using MPI, Printf
using Glob, Gurobi, JuMP
const TEMPDIR = tempdir()
const INPUT_PATH = "$(TEMPDIR)/input_files/new_scenarios"
const FILENAME = "$(abspath(joinpath(pathof(UnitCommitment), "..")))solution/methods/ProgressiveHedging/ph_subp.jl"
function _scenario_to_dict(
sc::UnitCommitment.UnitCommitmentScenario,
)::OrderedDict
json = OrderedDict()
json["Parameters"] = OrderedDict()
json["Parameters"]["Scenario name"] = sc.name
json["Parameters"]["Scenario weight"] = sc.probability
json["Parameters"]["Version"] = "0.3"
json["Parameters"]["Time (h)"] = sc.time
json["Parameters"]["Power balance penalty (\$/MW)"] =
sc.power_balance_penalty
if length(sc.reserves) > 0
json["Reserves"] = OrderedDict()
for r in sc.reserves
r_dict = json["Reserves"][r.name] = OrderedDict()
r_dict["Type"] = r.type
r_dict["Amount (MW)"] = r.amount
r_dict["Shortfall penalty (\$/MW)"] = r.shortfall_penalty
end
end
json["Buses"] = OrderedDict()
total_load = sum([b.load for b in sc.buses])
json["Buses"]["b1"] = OrderedDict()
json["Buses"]["b1"]["Load (MW)"] = total_load
json["Generators"] = OrderedDict()
for g in sc.thermal_units
g_dict = json["Generators"][g.name] = OrderedDict()
g_dict["Bus"] = "b1"
g_dict["Ramp up limit (MW)"] = g.ramp_up_limit
g_dict["Ramp down limit (MW)"] = g.ramp_down_limit
g_dict["Startup limit (MW)"] = g.startup_limit
g_dict["Shutdown limit (MW)"] = g.shutdown_limit
g_dict["Minimum uptime (h)"] = g.min_uptime
g_dict["Minimum downtime (h)"] = g.min_downtime
g_dict["Initial status (h)"] = g.initial_status
g_dict["Initial power (MW)"] = g.initial_power
g_dict["Must run?"] = g.must_run
g_dict["Startup delays (h)"] =
[st_c.delay for st_c in g.startup_categories]
g_dict["Startup costs (\$)"] =
[st_c.cost for st_c in g.startup_categories]
K = length(g.cost_segments) + 1
T = length(g.min_power)
curve_mw = Matrix{Float64}(undef, T, K)
curve_cost = Matrix{Float64}(undef, T, K)
curve_mw[:, 1] = g.min_power
curve_cost[:, 1] = g.min_power_cost
for k in 1:K-1
curve_mw[:, k+1] = curve_mw[:, k] + g.cost_segments[k].mw
curve_cost[:, k+1] =
curve_cost[:, k] +
(g.cost_segments[k].cost .* g.cost_segments[k].mw)
end
g_dict["Production cost curve (MW)"] = (curve_mw)
g_dict["Production cost curve (\$)"] = (curve_cost)
length(g.reserves) == 0 ||
(g_dict["Reserve eligibility"] = [r.name for r in g.reserves])
end
return json
end
function _create_new_scenarios(
s_num::Int,
r::Int,
system::String,
date::String,
)::String
path = "$(INPUT_PATH)/$(system)/snum_$(s_num)/run_$r"
benchmark = "matpower/$(system)/$(date)"
mkpath(path)
for sn in 1:s_num
sc = UnitCommitment.read_benchmark(benchmark).scenarios[1]
randomize!(sc, UnitCommitment.XavQiuAhm2021.Randomization())
sc.name = "s$(sn)"
sc.probability = 1 / s_num
sc_dict = _scenario_to_dict(sc)
write("$(path)/s$(sn).json", sc_dict)
end
return path
end
function _solve_instance(scenarios::Vector{String})::AbstractDict
instance = UnitCommitment.read(scenarios)
model = UnitCommitment.build_model(
instance = instance,
optimizer = Gurobi.Optimizer,
formulation = Formulation(),
)
set_optimizer_attribute(model, "Threads", length(instance.scenarios))
time_stat = @timed UnitCommitment.optimize!(model)
extensive_solution = Dict(
"objective value" => objective_value(model),
"binary values" =>
[value(var) for var in all_variables(model) if is_binary(var)],
"solution statistics" => time_stat,
)
return extensive_solution
end
function _write_setup(system::String, snum::Int, r::Int)
open("$(INPUT_PATH)/setup.txt", "w") do file
return Base.write(file, @sprintf("%s,%d,%d", system, snum, r))
end
end
function _retrieve_results(
extensive_solution::Dict,
solution_path::String,
)::OrderedDict
result = OrderedDict()
result["extensive form"] = OrderedDict()
result["extensive form"]["objective value"] =
extensive_solution["objective value"]
result["extensive form"]["wallclock time"] =
extensive_solution["solution statistics"].time
result["progressive hedging"] = OrderedDict()
open("$(solution_path)/1/global_obj.txt") do glob_obj
return global global_obj = split(Base.read(glob_obj, String))[1]
end
open("$(solution_path)/1/wallclock_time.txt") do wallcl_time
return global wallclock_time = split(Base.read(wallcl_time, String))[1]
end
result["progressive hedging"]["objective value"] =
parse(Float64, global_obj)
result["progressive hedging"]["wallclock time"] =
parse(Float64, wallclock_time)
extensive_binary_vals = extensive_solution["binary values"]
ph_binary_vals =
vec(readdlm("$(solution_path)/1/binary_vals.csv", ',', Float64))
result["% similarity"] =
(
1 - (
sum(abs.(extensive_binary_vals - ph_binary_vals)) /
length(extensive_binary_vals)
)
) * 100
return result
end
function fetch_ph_benchmark_summary_df(
solution_stat::OrderedDict,
)::AbstractDataFrame
extensive_obj = Vector{Float64}(undef, 0)
ph_obj = Vector{Float64}(undef, 0)
extensive_time = Vector{Float64}(undef, 0)
ph_time = Vector{Float64}(undef, 0)
setup = Vector{String}(undef, 0)
similarity = Vector{Float64}(undef, 0)
for (system, system_result) in solution_stat
for (snum, snum_result) in system_result
push!(setup, @sprintf("%s (%s)", system, snum))
push!(
extensive_obj,
round(
mean([
snum_result[rep]["extensive form"]["objective value"]
for rep in keys(snum_result)
]),
digits = 3,
),
)
push!(
ph_obj,
round(
mean([
snum_result[rep]["progressive hedging"]["objective value"]
for rep in keys(snum_result)
]),
digits = 3,
),
)
push!(
extensive_time,
round(
mean([
snum_result[rep]["extensive form"]["wallclock time"] for
rep in keys(snum_result)
]),
digits = 3,
),
)
push!(
ph_time,
round(
mean([
snum_result[rep]["progressive hedging"]["wallclock time"]
for rep in keys(snum_result)
]),
digits = 3,
),
)
push!(
similarity,
round(
mean([
snum_result[rep]["% similarity"] for
rep in keys(snum_result)
]),
digits = 3,
),
)
end
end
df = DataFrame(
hcat(setup, extensive_obj, ph_obj, extensive_time, ph_time, similarity),
[
"system (scenario number)",
"objective value-extensive",
"objective value-ph",
"wallclock time-extensive",
"wallclock time-ph",
"% similarity",
],
)
return df
end
function fetch_ph_benchmark_detailed_df(
solution_stat::OrderedDict,
)::AbstractDataFrame
systems = Vector{String}(undef, 0)
scenario_numbers = Vector{Int}(undef, 0)
methods = Vector{String}(undef, 0)
run_indices = Vector{Int}(undef, 0)
objective_values = Vector{Float64}(undef, 0)
wallclock_times = Vector{Float64}(undef, 0)
similarities = Vector{Float64}(undef, 0)
for (system, system_result) in solution_stat
for (snum, snum_result) in system_result
for (run, run_result) in snum_result
for method in ["extensive form", "progressive hedging"]
push!(systems, system)
push!(scenario_numbers, snum)
push!(methods, method)
push!(run_indices, run)
push!(
objective_values,
round(
run_result[method]["objective value"],
digits = 3,
),
)
push!(
wallclock_times,
round(run_result[method]["wallclock time"], digits = 3),
)
push!(
similarities,
round(run_result["% similarity"], digits = 3),
)
end
end
end
end
df = DataFrame(
hcat(
systems,
scenario_numbers,
methods,
run_indices,
objective_values,
wallclock_times,
similarities,
),
[
"system",
"number of scenarios",
"method",
"run-x",
"objective value",
"wallclock time",
"% similarity",
],
)
return df
end
function run_ph_benchmark(
cases::AbstractDict;
date::String = "2017-01-01",
)::OrderedDict
mkpath(INPUT_PATH)
solution_stat = OrderedDict()
for system in keys(cases)
solution_stat[system] = OrderedDict()
for s_num in cases[system]["scenario numbers"]
solution_stat[system][s_num] = OrderedDict()
for r in 1:cases[system]["number of runs"]
solution_stat[system][s_num][r] = OrderedDict()
scenario_path = _create_new_scenarios(s_num, r, system, date)
solution_path = "$(TEMPDIR)/output_files/$(system)/snum_$(s_num)/run_$(r)"
mkpath(solution_path)
extensive_solution =
_solve_instance(glob("*.json", scenario_path))
_write_setup(system, s_num, r)
mpiexec(
exe -> run(`$exe -n $s_num $(Base.julia_cmd()) $FILENAME`),
)
result = _retrieve_results(extensive_solution, solution_path)
solution_stat[system][s_num][r] = result
end
end
end
return solution_stat
end

@ -0,0 +1,41 @@
using MPI: MPI_Info, push!
using Gurobi, JuMP, MPI, Glob, DelimitedFiles, Printf, UnitCommitment
const TEMPDIR = tempdir()
input_path = "$(TEMPDIR)/input_files/new_scenarios"
function _write_results(
final_result::UnitCommitment.PHFinalResult,
solution_path::String,
)::Nothing
rank = UnitCommitment.MpiInfo(MPI.COMM_WORLD).rank
rank_solution_path = "$(solution_path)/$(rank)"
mkpath(rank_solution_path)
if rank == 1
open("$(rank_solution_path)/global_obj.txt", "w") do file
return Base.write(file, @sprintf("%s", final_result.obj))
end
open("$(rank_solution_path)/wallclock_time.txt", "w") do file
return Base.write(file, @sprintf("%s", final_result.wallclock_time))
end
writedlm(
"$(rank_solution_path)/binary_vals.csv",
final_result.vals,
',',
)
end
end
open("$(input_path)/setup.txt") do snum
return global system, s_num, r = split(Base.read(snum, String), ",")
end
new_scenario_path = "$(input_path)/$(system)/snum_$(s_num)/run_$r/"
solution_path = "$(TEMPDIR)/output_files/$(system)/snum_$(s_num)/run_$r"
MPI.Init()
ph = UnitCommitment.ProgressiveHedging()
instance = UnitCommitment.read(glob("*.json", new_scenario_path), ph)
model = UnitCommitment.build_model(
instance = instance,
optimizer = Gurobi.Optimizer,
)
final_result = UnitCommitment.optimize!(model, ph)
_write_results(final_result, solution_path)
MPI.Finalize()

@ -58,6 +58,12 @@ Base.@kwdef mutable struct PHSubProblemParams
target::Array{Float64,1} target::Array{Float64,1}
end end
Base.@kwdef mutable struct PHFinalResult
obj::Float64
vals::Array{Float64,1}
wallclock_time::Float64
end
struct MpiInfo struct MpiInfo
comm::Any comm::Any
rank::Int rank::Int

Loading…
Cancel
Save