Compare commits

..

1 Commits

Author SHA1 Message Date
1afd71b97b Make test/ a standalone project 2023-05-19 15:27:54 -05:00
56 changed files with 184 additions and 4842 deletions

View File

@@ -1,27 +0,0 @@
{
"creators": [
{
"orcid": "0000-0002-5022-9802",
"affiliation": "Argonne National Laboratory",
"name": "Santos Xavier, Alinson"
},
{
"affiliation": "University of Florida",
"name": "Kazachkov, Aleksandr M."
},
{
"affiliation": "Technische Universität Berlin",
"name": "Yurdakul, Ogün"
},
{
"affiliation": "Purdue University",
"name": "He, Jun"
},
{
"affiliation": "Argonne National Laboratory",
"name": "Qiu, Feng"
}
],
"title": "UnitCommitment.jl: A Julia/JuMP Optimization Package for Security-Constrained Unit Commitment",
"description": "<b>UnitCommitment.jl</b> (UC.jl) is an optimization package for the Security-Constrained Unit Commitment Problem (SCUC), a fundamental optimization problem in power systems used, for example, to clear the day-ahead electricity markets. The package provides benchmark instances for the problem and Julia/JuMP implementations of state-of-the-art mixed-integer programming formulations."
}

View File

@@ -13,15 +13,11 @@ JSON = "682c06a0-de6a-54ab-a142-c8b1cf79cde6"
JuMP = "4076af6c-e467-56ae-b986-b466b2749572"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
Logging = "56ddb016-857b-54e1-b83d-db4d58db5568"
MPI = "da04e1cc-30fd-572f-bb4f-1f8673147195"
MathOptInterface = "b8f27783-ece8-5eb3-8dc8-9495eed66fee"
PackageCompiler = "9b87118b-4619-50d2-8e1e-99f35a4d4d9d"
Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
Requires = "ae029012-a4dd-5104-9daa-d747884805df"
SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
Suppressor = "fd094767-a336-5f1f-9728-57cf17d0bbfb"
TimerOutputs = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f"
[compat]
DataStructures = "0.18"
@@ -29,8 +25,6 @@ Distributions = "0.25"
GZip = "0.5"
JSON = "0.21"
JuMP = "1"
MPI = "0.20"
MathOptInterface = "1"
PackageCompiler = "1"
TimerOutputs = "0.5"
julia = "1"

View File

@@ -1,9 +1,5 @@
[deps]
Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4"
Glob = "c27321d9-0574-5035-807b-f59d2c89b15c"
HiGHS = "87dc4568-4c63-4d18-b0c0-bb2238e4078b"
JSON = "682c06a0-de6a-54ab-a142-c8b1cf79cde6"
JuMP = "4076af6c-e467-56ae-b986-b466b2749572"
MPI = "da04e1cc-30fd-572f-bb4f-1f8673147195"
Revise = "295af30f-e4ad-537b-8983-00126c2a3abe"
UnitCommitment = "64606440-39ea-11e9-0f29-3303a1d3d877"

File diff suppressed because it is too large Load Diff

View File

@@ -1,495 +0,0 @@
{
"Parameters": {
"Version": "0.3",
"Time horizon (h)": 4
},
"Generators": {
"g1": {
"Bus": "b1",
"Production cost curve (MW)": [
100,
110,
130,
135
],
"Production cost curve ($)": [
1400,
1600,
2200,
2400
],
"Startup delays (h)": [
1,
2,
3
],
"Startup costs ($)": [
1000.0,
1500.0,
2000.0
],
"Initial status (h)": -100,
"Initial power (MW)": 0
},
"g2": {
"Bus": "b2",
"Production cost curve (MW)": [
0,
47,
94,
140
],
"Production cost curve ($)": [
0,
2256.00,
4733.37,
7395.39
],
"Startup delays (h)": [
1,
4
],
"Startup costs ($)": [
3000.0,
4000.0
],
"Ramp up limit (MW)": 98.0,
"Ramp down limit (MW)": 98.0,
"Startup limit (MW)": 98.0,
"Shutdown limit (MW)": 98.0,
"Minimum uptime (h)": 4,
"Minimum downtime (h)": 4,
"Maximum daily energy (MWh)": null,
"Maximum daily starts": null,
"Initial status (h)": -8,
"Initial power (MW)": 0,
"Reserve eligibility": [
"r1"
]
},
"g3": {
"Bus": "b3",
"Production cost curve (MW)": [
0,
33,
66,
100
],
"Production cost curve ($)": [
0,
1113.75,
2369.07,
3891.54
],
"Startup delays (h)": [
1,
4,
8
],
"Startup costs ($)": [
1000.0,
2000.0,
3000.0
],
"Ramp up limit (MW)": 70.0,
"Ramp down limit (MW)": 70.0,
"Startup limit (MW)": 70.0,
"Shutdown limit (MW)": 70.0,
"Must run?": true,
"Minimum uptime (h)": 1,
"Minimum downtime (h)": 1,
"Maximum daily energy (MWh)": null,
"Maximum daily starts": null,
"Initial status (h)": -6,
"Initial power (MW)": 0,
"Reserve eligibility": [
"r1"
]
},
"g4": {
"Bus": "b6",
"Production cost curve (MW)": [
33,
66,
100
],
"Production cost curve ($)": [
1113.75,
2369.07,
3891.54
],
"Initial status (h)": -100,
"Initial power (MW)": 0,
"Reserve eligibility": [
"r1"
]
},
"g5": {
"Bus": "b8",
"Production cost curve (MW)": [
33,
66,
100
],
"Production cost curve ($)": [
1113.75,
2369.07,
3891.54
],
"Initial status (h)": -100,
"Initial power (MW)": 0,
"Reserve eligibility": [
"r1"
]
},
"g6": {
"Bus": "b8",
"Production cost curve (MW)": [
100
],
"Production cost curve ($)": [
10000.00
],
"Initial status (h)": -100,
"Initial power (MW)": 0,
"Reserve eligibility": [
"r1"
]
}
},
"Buses": {
"b1": {
"Load (MW)": 0.0
},
"b2": {
"Load (MW)": [
26.01527,
24.46212,
23.29725,
22.90897
]
},
"b3": {
"Load (MW)": [
112.93263,
106.19039,
101.1337,
99.44814
]
},
"b4": {
"Load (MW)": [
57.30552,
53.88429,
51.31838,
50.46307
]
},
"b5": {
"Load (MW)": [
9.11134,
8.56738,
8.15941,
8.02342
]
},
"b6": {
"Load (MW)": [
13.42723,
12.62561,
12.02439,
11.82398
]
},
"b7": {
"Load (MW)": 0.0
},
"b8": {
"Load (MW)": 0.0
},
"b9": {
"Load (MW)": [
35.36638,
33.25495,
31.67138,
31.14353
]
},
"b10": {
"Load (MW)": [
10.78974,
10.14558,
9.66246,
9.50141
]
},
"b11": {
"Load (MW)": [
4.19601,
3.9455,
3.75762,
3.69499
]
},
"b12": {
"Load (MW)": [
7.31305,
6.87645,
6.549,
6.43985
]
},
"b13": {
"Load (MW)": [
16.18461,
15.21837,
14.49368,
14.25212
]
},
"b14": {
"Load (MW)": [
17.86302,
16.79657,
15.99673,
15.73012
]
}
},
"Transmission lines": {
"l1": {
"Source bus": "b1",
"Target bus": "b2",
"Reactance (ohms)": 0.05917000000000001,
"Susceptance (S)": 29.496860773945063,
"Normal flow limit (MW)": 300.0,
"Emergency flow limit (MW)": 400.0,
"Flow limit penalty ($/MW)": 1000.0
},
"l2": {
"Source bus": "b1",
"Target bus": "b5",
"Reactance (ohms)": 0.22304000000000002,
"Susceptance (S)": 7.825184953346168
},
"l3": {
"Source bus": "b2",
"Target bus": "b3",
"Reactance (ohms)": 0.19797,
"Susceptance (S)": 8.816129979261149
},
"l4": {
"Source bus": "b2",
"Target bus": "b4",
"Reactance (ohms)": 0.17632,
"Susceptance (S)": 9.898645939169292
},
"l5": {
"Source bus": "b2",
"Target bus": "b5",
"Reactance (ohms)": 0.17388,
"Susceptance (S)": 10.037550333530765
},
"l6": {
"Source bus": "b3",
"Target bus": "b4",
"Reactance (ohms)": 0.17103,
"Susceptance (S)": 10.204813494675376
},
"l7": {
"Source bus": "b4",
"Target bus": "b5",
"Reactance (ohms)": 0.04211,
"Susceptance (S)": 41.44690695783257
},
"l8": {
"Source bus": "b4",
"Target bus": "b7",
"Reactance (ohms)": 0.20911999999999997,
"Susceptance (S)": 8.346065665619404
},
"l9": {
"Source bus": "b4",
"Target bus": "b9",
"Reactance (ohms)": 0.55618,
"Susceptance (S)": 3.1380654680037567
},
"l10": {
"Source bus": "b5",
"Target bus": "b6",
"Reactance (ohms)": 0.25201999999999997,
"Susceptance (S)": 6.92536009838239
},
"l11": {
"Source bus": "b6",
"Target bus": "b11",
"Reactance (ohms)": 0.1989,
"Susceptance (S)": 8.774908255376218
},
"l12": {
"Source bus": "b6",
"Target bus": "b12",
"Reactance (ohms)": 0.25581,
"Susceptance (S)": 6.8227561549365925
},
"l13": {
"Source bus": "b6",
"Target bus": "b13",
"Reactance (ohms)": 0.13027,
"Susceptance (S)": 13.397783465067395
},
"l14": {
"Source bus": "b7",
"Target bus": "b8",
"Reactance (ohms)": 0.17615,
"Susceptance (S)": 9.908198989465395
},
"l15": {
"Source bus": "b7",
"Target bus": "b9",
"Reactance (ohms)": 0.11001,
"Susceptance (S)": 15.865187273832648
},
"l16": {
"Source bus": "b9",
"Target bus": "b10",
"Reactance (ohms)": 0.0845,
"Susceptance (S)": 20.65478404727017
},
"l17": {
"Source bus": "b9",
"Target bus": "b14",
"Reactance (ohms)": 0.27038,
"Susceptance (S)": 6.4550974628091184
},
"l18": {
"Source bus": "b10",
"Target bus": "b11",
"Reactance (ohms)": 0.19207,
"Susceptance (S)": 9.08694357262628
},
"l19": {
"Source bus": "b12",
"Target bus": "b13",
"Reactance (ohms)": 0.19988,
"Susceptance (S)": 8.73188539120637
},
"l20": {
"Source bus": "b13",
"Target bus": "b14",
"Reactance (ohms)": 0.34802,
"Susceptance (S)": 5.0150257226433235
}
},
"Contingencies": {
"c1": {
"Affected lines": [
"l1"
]
},
"c2": {
"Affected lines": [
"l2"
]
},
"c3": {
"Affected lines": [
"l3"
]
},
"c4": {
"Affected lines": [
"l4"
]
},
"c5": {
"Affected lines": [
"l5"
]
},
"c6": {
"Affected lines": [
"l6"
]
},
"c7": {
"Affected lines": [
"l7"
]
},
"c8": {
"Affected lines": [
"l8"
]
},
"c9": {
"Affected lines": [
"l9"
]
},
"c10": {
"Affected lines": [
"l10"
]
},
"c11": {
"Affected lines": [
"l11"
]
},
"c12": {
"Affected lines": [
"l12"
]
},
"c13": {
"Affected lines": [
"l13"
]
},
"c15": {
"Affected lines": [
"l15"
]
},
"c16": {
"Affected lines": [
"l16"
]
},
"c17": {
"Affected lines": [
"l17"
]
},
"c18": {
"Affected lines": [
"l18"
]
},
"c19": {
"Affected lines": [
"l19"
]
},
"c20": {
"Affected lines": [
"l20"
]
}
},
"Price-sensitive loads": {
"ps1": {
"Bus": "b3",
"Revenue ($/MW)": 100.0,
"Demand (MW)": 50.0
}
},
"Reserves": {
"r1": {
"Type": "Spinning",
"Amount (MW)": 100.0,
"Shortfall penalty ($/MW)": 1000.0
}
}
}

View File

@@ -1,495 +0,0 @@
{
"Parameters": {
"Version": "0.3",
"Time horizon (h)": 4
},
"Generators": {
"g1": {
"Bus": "b1",
"Production cost curve (MW)": [
100,
110,
130,
135
],
"Production cost curve ($)": [
1400,
1600,
2200,
2400
],
"Startup delays (h)": [
1,
2,
3
],
"Startup costs ($)": [
1000.0,
1500.0,
2000.0
],
"Initial status (h)": -100,
"Initial power (MW)": 0
},
"g2": {
"Bus": "b2",
"Production cost curve (MW)": [
0,
47,
94,
140
],
"Production cost curve ($)": [
0,
2256.00,
4733.37,
7395.39
],
"Startup delays (h)": [
1,
4
],
"Startup costs ($)": [
3000.0,
4000.0
],
"Ramp up limit (MW)": 98.0,
"Ramp down limit (MW)": 98.0,
"Startup limit (MW)": 98.0,
"Shutdown limit (MW)": 98.0,
"Minimum uptime (h)": 4,
"Minimum downtime (h)": 4,
"Maximum daily energy (MWh)": null,
"Maximum daily starts": null,
"Initial status (h)": -8,
"Initial power (MW)": 0,
"Reserve eligibility": [
"r1"
]
},
"g3": {
"Bus": "b3",
"Production cost curve (MW)": [
0,
33,
66,
100
],
"Production cost curve ($)": [
0,
1113.75,
2369.07,
3891.54
],
"Startup delays (h)": [
1,
4,
8
],
"Startup costs ($)": [
1000.0,
2000.0,
3000.0
],
"Ramp up limit (MW)": 70.0,
"Ramp down limit (MW)": 70.0,
"Startup limit (MW)": 70.0,
"Shutdown limit (MW)": 70.0,
"Must run?": true,
"Minimum uptime (h)": 1,
"Minimum downtime (h)": 1,
"Maximum daily energy (MWh)": null,
"Maximum daily starts": null,
"Initial status (h)": -6,
"Initial power (MW)": 0,
"Reserve eligibility": [
"r1"
]
},
"g4": {
"Bus": "b6",
"Production cost curve (MW)": [
33,
66,
100
],
"Production cost curve ($)": [
1113.75,
2369.07,
3891.54
],
"Initial status (h)": -100,
"Initial power (MW)": 0,
"Reserve eligibility": [
"r1"
]
},
"g5": {
"Bus": "b8",
"Production cost curve (MW)": [
33,
66,
100
],
"Production cost curve ($)": [
1113.75,
2369.07,
3891.54
],
"Initial status (h)": -100,
"Initial power (MW)": 0,
"Reserve eligibility": [
"r1"
]
},
"g6": {
"Bus": "b8",
"Production cost curve (MW)": [
100
],
"Production cost curve ($)": [
10000.00
],
"Initial status (h)": -100,
"Initial power (MW)": 0,
"Reserve eligibility": [
"r1"
]
}
},
"Buses": {
"b1": {
"Load (MW)": 0.0
},
"b2": {
"Load (MW)": [
26.01527,
24.46212,
23.29725,
22.90897
]
},
"b3": {
"Load (MW)": [
112.93263,
106.19039,
101.1337,
99.44814
]
},
"b4": {
"Load (MW)": [
57.30552,
53.88429,
51.31838,
50.46307
]
},
"b5": {
"Load (MW)": [
9.11134,
8.56738,
8.15941,
8.02342
]
},
"b6": {
"Load (MW)": [
13.42723,
12.62561,
12.02439,
11.82398
]
},
"b7": {
"Load (MW)": 0.0
},
"b8": {
"Load (MW)": 0.0
},
"b9": {
"Load (MW)": [
35.36638,
33.25495,
31.67138,
31.14353
]
},
"b10": {
"Load (MW)": [
10.78974,
10.14558,
9.66246,
9.50141
]
},
"b11": {
"Load (MW)": [
4.19601,
3.9455,
3.75762,
3.69499
]
},
"b12": {
"Load (MW)": [
7.31305,
6.87645,
6.549,
6.43985
]
},
"b13": {
"Load (MW)": [
16.18461,
15.21837,
14.49368,
14.25212
]
},
"b14": {
"Load (MW)": [
17.86302,
16.79657,
15.99673,
15.73012
]
}
},
"Transmission lines": {
"l1": {
"Source bus": "b1",
"Target bus": "b2",
"Reactance (ohms)": 0.05917000000000001,
"Susceptance (S)": 29.496860773945063,
"Normal flow limit (MW)": 300.0,
"Emergency flow limit (MW)": 400.0,
"Flow limit penalty ($/MW)": 1000.0
},
"l2": {
"Source bus": "b1",
"Target bus": "b5",
"Reactance (ohms)": 0.22304000000000002,
"Susceptance (S)": 7.825184953346168
},
"l3": {
"Source bus": "b2",
"Target bus": "b3",
"Reactance (ohms)": 0.19797,
"Susceptance (S)": 8.816129979261149
},
"l4": {
"Source bus": "b2",
"Target bus": "b4",
"Reactance (ohms)": 0.17632,
"Susceptance (S)": 9.898645939169292
},
"l5": {
"Source bus": "b2",
"Target bus": "b5",
"Reactance (ohms)": 0.17388,
"Susceptance (S)": 10.037550333530765
},
"l6": {
"Source bus": "b3",
"Target bus": "b4",
"Reactance (ohms)": 0.17103,
"Susceptance (S)": 10.204813494675376
},
"l7": {
"Source bus": "b4",
"Target bus": "b5",
"Reactance (ohms)": 0.04211,
"Susceptance (S)": 41.44690695783257
},
"l8": {
"Source bus": "b4",
"Target bus": "b7",
"Reactance (ohms)": 0.20911999999999997,
"Susceptance (S)": 8.346065665619404
},
"l9": {
"Source bus": "b4",
"Target bus": "b9",
"Reactance (ohms)": 0.55618,
"Susceptance (S)": 3.1380654680037567
},
"l10": {
"Source bus": "b5",
"Target bus": "b6",
"Reactance (ohms)": 0.25201999999999997,
"Susceptance (S)": 6.92536009838239
},
"l11": {
"Source bus": "b6",
"Target bus": "b11",
"Reactance (ohms)": 0.1989,
"Susceptance (S)": 8.774908255376218
},
"l12": {
"Source bus": "b6",
"Target bus": "b12",
"Reactance (ohms)": 0.25581,
"Susceptance (S)": 6.8227561549365925
},
"l13": {
"Source bus": "b6",
"Target bus": "b13",
"Reactance (ohms)": 0.13027,
"Susceptance (S)": 13.397783465067395
},
"l14": {
"Source bus": "b7",
"Target bus": "b8",
"Reactance (ohms)": 0.17615,
"Susceptance (S)": 9.908198989465395
},
"l15": {
"Source bus": "b7",
"Target bus": "b9",
"Reactance (ohms)": 0.11001,
"Susceptance (S)": 15.865187273832648
},
"l16": {
"Source bus": "b9",
"Target bus": "b10",
"Reactance (ohms)": 0.0845,
"Susceptance (S)": 20.65478404727017
},
"l17": {
"Source bus": "b9",
"Target bus": "b14",
"Reactance (ohms)": 0.27038,
"Susceptance (S)": 6.4550974628091184
},
"l18": {
"Source bus": "b10",
"Target bus": "b11",
"Reactance (ohms)": 0.19207,
"Susceptance (S)": 9.08694357262628
},
"l19": {
"Source bus": "b12",
"Target bus": "b13",
"Reactance (ohms)": 0.19988,
"Susceptance (S)": 8.73188539120637
},
"l20": {
"Source bus": "b13",
"Target bus": "b14",
"Reactance (ohms)": 0.34802,
"Susceptance (S)": 5.0150257226433235
}
},
"Contingencies": {
"c1": {
"Affected lines": [
"l1"
]
},
"c2": {
"Affected lines": [
"l2"
]
},
"c3": {
"Affected lines": [
"l3"
]
},
"c4": {
"Affected lines": [
"l4"
]
},
"c5": {
"Affected lines": [
"l5"
]
},
"c6": {
"Affected lines": [
"l6"
]
},
"c7": {
"Affected lines": [
"l7"
]
},
"c8": {
"Affected lines": [
"l8"
]
},
"c9": {
"Affected lines": [
"l9"
]
},
"c10": {
"Affected lines": [
"l10"
]
},
"c11": {
"Affected lines": [
"l11"
]
},
"c12": {
"Affected lines": [
"l12"
]
},
"c13": {
"Affected lines": [
"l13"
]
},
"c15": {
"Affected lines": [
"l15"
]
},
"c16": {
"Affected lines": [
"l16"
]
},
"c17": {
"Affected lines": [
"l17"
]
},
"c18": {
"Affected lines": [
"l18"
]
},
"c19": {
"Affected lines": [
"l19"
]
},
"c20": {
"Affected lines": [
"l20"
]
}
},
"Price-sensitive loads": {
"ps1": {
"Bus": "b3",
"Revenue ($/MW)": 100.0,
"Demand (MW)": 50.0
}
},
"Reserves": {
"r1": {
"Type": "Spinning",
"Amount (MW)": 100.0,
"Shortfall penalty ($/MW)": 1000.0
}
}
}

View File

@@ -1,18 +1,16 @@
using Documenter, UnitCommitment, JuMP
function make()
makedocs(
sitename="UnitCommitment.jl",
pages=[
"Home" => "index.md",
"usage.md",
"format.md",
"instances.md",
"model.md",
"api.md",
],
format = Documenter.HTML(
assets=["assets/custom.css"],
)
makedocs(
sitename="UnitCommitment.jl",
pages=[
"Home" => "index.md",
"usage.md",
"format.md",
"instances.md",
"model.md",
"api.md",
],
format = Documenter.HTML(
assets=["assets/custom.css"],
)
end
)

View File

@@ -4,12 +4,11 @@ Data Format
Input Data Format
-----------------
An instance of the stochastic security-constrained unit commitment (SCUC) problem is composed multiple scenarios. Each scenario should be described in an individual JSON file containing the main section belows. For deterministic instances, a single scenario file, following the same format below, may also be provided. Fields that are allowed to differ among scenarios are marked as "uncertain". Fields that are allowed to be time-dependent are marked as "time series".
Instances are specified by JSON files containing the following main sections:
* [Parameters](#Parameters)
* [Buses](#Buses)
* [Generators](#Generators)
* [Storage units](#Storage-units)
* [Price-sensitive loads](#Price-sensitive-loads)
* [Transmission lines](#Transmission-lines)
* [Reserves](#Reserves)
@@ -21,14 +20,12 @@ Each section is described in detail below. See [case118/2017-01-01.json.gz](http
This section describes system-wide parameters, such as power balance penalty, and optimization parameters, such as the length of the planning horizon and the time.
| Key | Description | Default | Time series? | Uncertain?
| :----------------------------- | :------------------------------------------------ | :------: | :------------:| :----------:
| `Version` | Version of UnitCommitment.jl this file was written for. Required to ensure that the file remains readable in future versions of the package. If you are following this page to construct the file, this field should equal `0.4`. | Required | No | No
| `Time horizon (min)` or `Time horizon (h)` | Length of the planning horizon (in minutes or hours). Either `Time horizon (min)` or `Time horizon (h)` is required, but not both. | Required | No | No
| `Time step (min)` | Length of each time step (in minutes). Must be a divisor of 60 (e.g. 60, 30, 20, 15, etc). | `60` | No | No
| `Power balance penalty ($/MW)` | Penalty for system-wide shortage or surplus in production (in $/MW). This is charged per time step. For example, if there is a shortage of 1 MW for three time steps, three times this amount will be charged. | `1000.0` | No | Yes
| `Scenario name` | Name of the scenario. | `"s1"` | No | ---
| `Scenario weight` | Weight of the scenario. The scenario weight can be any positive real number, that is, it does not have to be between zero and one. The package normalizes the weights to ensure that the probability of all scenarios sum up to one. | 1.0 | No | ---
| Key | Description | Default | Time series?
| :----------------------------- | :------------------------------------------------ | :------: | :------------:
| `Version` | Version of UnitCommitment.jl this file was written for. Required to ensure that the file remains readable in future versions of the package. If you are following this page to construct the file, this field should equal `0.3`. | Required | N
| `Time horizon (h)` | Length of the planning horizon (in hours). | Required | N
| `Time step (min)` | Length of each time step (in minutes). Must be a divisor of 60 (e.g. 60, 30, 20, 15, etc). | `60` | N
| `Power balance penalty ($/MW)` | Penalty for system-wide shortage or surplus in production (in $/MW). This is charged per time step. For example, if there is a shortage of 1 MW for three time steps, three times this amount will be charged. | `1000.0` | Y
#### Example
@@ -37,9 +34,7 @@ This section describes system-wide parameters, such as power balance penalty, an
"Parameters": {
"Version": "0.3",
"Time horizon (h)": 4,
"Power balance penalty ($/MW)": 1000.0,
"Scenario name": "s1",
"Scenario weight": 0.5
"Power balance penalty ($/MW)": 1000.0
}
}
```
@@ -48,9 +43,9 @@ This section describes system-wide parameters, such as power balance penalty, an
This section describes the characteristics of each bus in the system.
| Key | Description | Default | Time series? | Uncertain?
| :----------------- | :------------------------------------------------------------ | ------- | :-----------: | :---:
| `Load (MW)` | Fixed load connected to the bus (in MW). | Required | Yes | Yes
| Key | Description | Default | Time series?
| :----------------- | :------------------------------------------------------------ | ------- | :-------------:
| `Load (MW)` | Fixed load connected to the bus (in MW). | Required | Y
#### Example
@@ -82,33 +77,33 @@ This section describes all generators in the system. Two types of units can be s
#### Thermal Units
| Key | Description | Default | Time series? | Uncertain?
| :------------------------ | :------------------------------------------------| ------- | :-----------: | :---:
| `Bus` | Identifier of the bus where this generator is located (string). | Required | No | Yes
| `Type` | Type of the generator (string). For thermal generators, this must be `Thermal`. | Required | No | No
| `Production cost curve (MW)` and `Production cost curve ($)` | Parameters describing the piecewise-linear production costs. See below for more details. | Required | Yes | Yes
| `Startup costs ($)` and `Startup delays (h)` | Parameters describing how much it costs to start the generator after it has been shut down for a certain amount of time. If `Startup costs ($)` and `Startup delays (h)` are set to `[300.0, 400.0]` and `[1, 4]`, for example, and the generator is shut down at time `00:00` (h:min), then it costs \$300 to start up the generator at any time between `01:00` and `03:59`, and \$400 to start the generator at time `04:00` or any time after that. The number of startup cost points is unlimited, and may be different for each generator. Startup delays must be strictly increasing and the first entry must equal `Minimum downtime (h)`. | `[0.0]` and `[1]` | No | Yes
| `Minimum uptime (h)` | Minimum amount of time the generator must stay operational after starting up (in hours). For example, if the generator starts up at time `00:00` (h:min) and `Minimum uptime (h)` is set to 4, then the generator can only shut down at time `04:00`. | `1` | No | Yes
| `Minimum downtime (h)` | Minimum amount of time the generator must stay offline after shutting down (in hours). For example, if the generator shuts down at time `00:00` (h:min) and `Minimum downtime (h)` is set to 4, then the generator can only start producing power again at time `04:00`. | `1` | No | Yes
| `Ramp up limit (MW)` | Maximum increase in production from one time step to the next (in MW). For example, if the generator is producing 100 MW at time step 1 and if this parameter is set to 40 MW, then the generator will produce at most 140 MW at time step 2. | `+inf` | No | Yes
| `Ramp down limit (MW)` | Maximum decrease in production from one time step to the next (in MW). For example, if the generator is producing 100 MW at time step 1 and this parameter is set to 40 MW, then the generator will produce at least 60 MW at time step 2. | `+inf` | No | Yes
| `Startup limit (MW)` | Maximum amount of power a generator can produce immediately after starting up (in MW). For example, if `Startup limit (MW)` is set to 100 MW and the unit is off at time step 1, then it may produce at most 100 MW at time step 2.| `+inf` | No | Yes
| `Shutdown limit (MW)` | Maximum amount of power a generator can produce immediately before shutting down (in MW). Specifically, the generator can only shut down at time step `t+1` if its production at time step `t` is below this limit. | `+inf` | No | Yes
| `Initial status (h)` | If set to a positive number, indicates the amount of time (in hours) the generator has been on at the beginning of the simulation, and if set to a negative number, the amount of time the generator has been off. For example, if `Initial status (h)` is `-2`, this means that the generator was off since `-02:00` (h:min). The simulation starts at time `00:00`. If `Initial status (h)` is `3`, this means that the generator was on since `-03:00`. A value of zero is not acceptable. | Required | No | No
| `Initial power (MW)` | Amount of power the generator at time step `-1`, immediately before the planning horizon starts. | Required | No | No
| `Must run?` | If `true`, the generator should be committed, even if that is not economical (Boolean). | `false` | Yes | Yes
| `Reserve eligibility` | List of reserve products this generator is eligibe to provide. By default, the generator is not eligible to provide any reserves. | `[]` | No | Yes
| `Commitment status` | List of commitment status over the time horizon. At time `t`, if `true`, the generator must be commited at that time period; if `false`, the generator must not be commited at that time period. If `null` at time `t`, the generator's commitment status is then decided by the model. By default, the status is a list of `null` values. | `null` | Yes | Yes
| Key | Description | Default | Time series?
| :------------------------ | :------------------------------------------------| ------- | :-----------:
| `Bus` | Identifier of the bus where this generator is located (string). | Required | N
| `Type` | Type of the generator (string). For thermal generators, this must be `Thermal`. | Required | N
| `Production cost curve (MW)` and `Production cost curve ($)` | Parameters describing the piecewise-linear production costs. See below for more details. | Required | Y
| `Startup costs ($)` and `Startup delays (h)` | Parameters describing how much it costs to start the generator after it has been shut down for a certain amount of time. If `Startup costs ($)` and `Startup delays (h)` are set to `[300.0, 400.0]` and `[1, 4]`, for example, and the generator is shut down at time `00:00` (h:min), then it costs \$300 to start up the generator at any time between `01:00` and `03:59`, and \$400 to start the generator at time `04:00` or any time after that. The number of startup cost points is unlimited, and may be different for each generator. Startup delays must be strictly increasing and the first entry must equal `Minimum downtime (h)`. | `[0.0]` and `[1]` | N
| `Minimum uptime (h)` | Minimum amount of time the generator must stay operational after starting up (in hours). For example, if the generator starts up at time `00:00` (h:min) and `Minimum uptime (h)` is set to 4, then the generator can only shut down at time `04:00`. | `1` | N
| `Minimum downtime (h)` | Minimum amount of time the generator must stay offline after shutting down (in hours). For example, if the generator shuts down at time `00:00` (h:min) and `Minimum downtime (h)` is set to 4, then the generator can only start producing power again at time `04:00`. | `1` | N
| `Ramp up limit (MW)` | Maximum increase in production from one time step to the next (in MW). For example, if the generator is producing 100 MW at time step 1 and if this parameter is set to 40 MW, then the generator will produce at most 140 MW at time step 2. | `+inf` | N
| `Ramp down limit (MW)` | Maximum decrease in production from one time step to the next (in MW). For example, if the generator is producing 100 MW at time step 1 and this parameter is set to 40 MW, then the generator will produce at least 60 MW at time step 2. | `+inf` | N
| `Startup limit (MW)` | Maximum amount of power a generator can produce immediately after starting up (in MW). For example, if `Startup limit (MW)` is set to 100 MW and the unit is off at time step 1, then it may produce at most 100 MW at time step 2.| `+inf` | N
| `Shutdown limit (MW)` | Maximum amount of power a generator can produce immediately before shutting down (in MW). Specifically, the generator can only shut down at time step `t+1` if its production at time step `t` is below this limit. | `+inf` | N
| `Initial status (h)` | If set to a positive number, indicates the amount of time (in hours) the generator has been on at the beginning of the simulation, and if set to a negative number, the amount of time the generator has been off. For example, if `Initial status (h)` is `-2`, this means that the generator was off since `-02:00` (h:min). The simulation starts at time `00:00`. If `Initial status (h)` is `3`, this means that the generator was on since `-03:00`. A value of zero is not acceptable. | Required | N
| `Initial power (MW)` | Amount of power the generator at time step `-1`, immediately before the planning horizon starts. | Required | N
| `Must run?` | If `true`, the generator should be committed, even if that is not economical (Boolean). | `false` | Y
| `Reserve eligibility` | List of reserve products this generator is eligibe to provide. By default, the generator is not eligible to provide any reserves. | `[]` | N
| `Commitment status` | List of commitment status over the time horizon. At time `t`, if `true`, the generator must be commited at that time period; if `false`, the generator must not be commited at that time period. If `null` at time `t`, the generator's commitment status is then decided by the model. By default, the status is a list of `null` values. | `null` | Y
#### Profiled Units
| Key | Description | Default | Time series? | Uncertain?
| :---------------- | :------------------------------------------------ | :------: | :------------: | :---:
| `Bus` | Identifier of the bus where this generator is located (string). | Required | No | Yes
| `Type` | Type of the generator (string). For profiled generators, this must be `Profiled`. | Required | No | No
| `Cost ($/MW)` | Cost incurred for serving each MW of power by this generator. | Required | Yes | Yes
| `Minimum power (MW)` | Minimum amount of power this generator may supply. | `0.0` | Yes | Yes
| `Maximum power (MW)` | Maximum amount of power this generator may supply. | Required | Yes | Yes
| Key | Description | Default | Time series?
| :---------------- | :------------------------------------------------ | :------: | :------------:
| `Bus` | Identifier of the bus where this generator is located (string). | Required | N
| `Type` | Type of the generator (string). For profiled generators, this must be `Profiled`. | Required | N
| `Cost ($/MW)` | Cost incurred for serving each MW of power by this generator. | Required | Y
| `Minimum power (MW)` | Minimum amount of power this generator may supply. | `0.0` | Y
| `Maximum power (MW)` | Maximum amount of power this generator may supply. | Required | Y
#### Production costs and limits
@@ -174,90 +169,15 @@ Note that this curve also specifies the production limits. Specifically, the fir
}
```
### Storage units
This section describes energy storage units in the system which charge and discharge power. The storage units consume power while charging, and generate power while discharging.
| Key | Description | Default | Time series? | Uncertain?
| :---------------- | :------------------------------------------------ | :------: | :------------: | :----:
| `Bus` | Bus where the storage unit is located. Multiple storage units may be placed at the same bus. | Required | No | Yes
| `Minimum level (MWh)` | Minimum of energy level this storage unit may contain. | `0.0` | Yes | Yes
| `Maximum level (MWh)` | Maximum of energy level this storage unit may contain. | Required | Yes | Yes
| `Allow simultaneous charging and discharging` | If `false`, the storage unit is not allowed to charge and discharge at the same time (Boolean). | `true` | Yes | Yes
| `Charge cost ($/MW)` | Cost incurred for charging each MW of power into this storage unit. | Required | Yes | Yes
| `Discharge cost ($/MW)` | Cost incurred for discharging each MW of power from this storage unit. | Required | Yes | Yes
| `Charge efficiency` | Efficiency rate to charge power into this storage unit. This value must be greater than or equal to `0.0`, and less than or equal to `1.0`. | `1.0` | Yes | Yes
| `Discharge efficiency` | Efficiency rate to discharge power from this storage unit. This value must be greater than or equal to `0.0`, and less than or equal to `1.0`. | `1.0` | Yes | Yes
| `Loss factor` | The energy dissipation rate of this storage unit. This value must be greater than or equal to `0.0`, and less than or equal to `1.0`. | `0.0` | Yes | Yes
| `Minimum charge rate (MW)` | Minimum amount of power rate this storage unit may charge. | `0.0` | Yes | Yes
| `Maximum charge rate (MW)` | Maximum amount of power rate this storage unit may charge. | Required | Yes | Yes
| `Minimum discharge rate (MW)` | Minimum amount of power rate this storage unit may discharge. | `0.0` | Yes | Yes
| `Maximum discharge rate (MW)` | Maximum amount of power rate this storage unit may discharge. | Required | Yes | Yes
| `Initial level (MWh)` | Amount of energy this storage unit at time step `-1`, immediately before the planning horizon starts. | `0.0` | No | Yes
| `Last period minimum level (MWh)` | Minimum of energy level this storage unit may contain in the last time step. By default, this value is the same as the last value of `Minimum level (MWh)`. | `Minimum level (MWh)` | No | Yes
| `Last period maximum level (MWh)` | Maximum of energy level this storage unit may contain in the last time step. By default, this value is the same as the last value of `Maximum level (MWh)`. | `Maximum level (MWh)` | No | Yes
#### Example
```json
{
"Storage units": {
"su1": {
"Bus": "b2",
"Maximum level (MWh)": 100.0,
"Charge cost ($/MW)": 2.0,
"Discharge cost ($/MW)": 2.5,
"Maximum charge rate (MW)": 10.0,
"Maximum discharge rate (MW)": 8.0
},
"su2": {
"Bus": "b2",
"Minimum level (MWh)": 10.0,
"Maximum level (MWh)": 100.0,
"Allow simultaneous charging and discharging": false,
"Charge cost ($/MW)": 3.0,
"Discharge cost ($/MW)": 3.5,
"Charge efficiency": 0.8,
"Discharge efficiency": 0.85,
"Loss factor": 0.01,
"Minimum charge rate (MW)": 5.0,
"Maximum charge rate (MW)": 10.0,
"Minimum discharge rate (MW)": 2.0,
"Maximum discharge rate (MW)": 10.0,
"Initial level (MWh)": 70.0,
"Last period minimum level (MWh)": 80.0,
"Last period maximum level (MWh)": 85.0
},
"su3": {
"Bus": "b9",
"Minimum level (MWh)": [10.0, 11.0, 12.0, 13.0],
"Maximum level (MWh)": [100.0, 110.0, 120.0, 130.0],
"Allow simultaneous charging and discharging": [false, false, true, true],
"Charge cost ($/MW)": [2.0, 2.1, 2.2, 2.3],
"Discharge cost ($/MW)": [1.0, 1.1, 1.2, 1.3],
"Charge efficiency": [0.8, 0.81, 0.82, 0.82],
"Discharge efficiency": [0.85, 0.86, 0.87, 0.88],
"Loss factor": [0.01, 0.01, 0.02, 0.02],
"Minimum charge rate (MW)": [5.0, 5.1, 5.2, 5.3],
"Maximum charge rate (MW)": [10.0, 10.1, 10.2, 10.3],
"Minimum discharge rate (MW)": [4.0, 4.1, 4.2, 4.3],
"Maximum discharge rate (MW)": [8.0, 8.1, 8.2, 8.3],
"Initial level (MWh)": 20.0,
"Last period minimum level (MWh)": 21.0,
"Last period maximum level (MWh)": 22.0
}
}
}
```
### Price-sensitive loads
This section describes components in the system which may increase or reduce their energy consumption according to the energy prices. Fixed loads (as described in the `buses` section) are always served, regardless of the price, unless there is significant congestion in the system or insufficient production capacity. Price-sensitive loads, on the other hand, are only served if it is economical to do so.
| Key | Description | Default | Time series? | Uncertain?
| :---------------- | :------------------------------------------------ | :------: | :------------: | :----:
| `Bus` | Bus where the load is located. Multiple price-sensitive loads may be placed at the same bus. | Required | No | Yes
| `Revenue ($/MW)` | Revenue obtained for serving each MW of power to this load. | Required | Yes | Yes
| `Demand (MW)` | Maximum amount of power required by this load. Any amount lower than this may be served. | Required | Yes | Yes
| Key | Description | Default | Time series?
| :---------------- | :------------------------------------------------ | :------: | :------------:
| `Bus` | Bus where the load is located. Multiple price-sensitive loads may be placed at the same bus. | Required | N
| `Revenue ($/MW)` | Revenue obtained for serving each MW of power to this load. | Required | Y
| `Demand (MW)` | Maximum amount of power required by this load. Any amount lower than this may be served. | Required | Y
#### Example
@@ -277,14 +197,15 @@ This section describes components in the system which may increase or reduce the
This section describes the characteristics of transmission system, such as its topology and the susceptance of each transmission line.
| Key | Description | Default | Time series? | Uncertain?
| :--------------------- | :----------------------------------------------- | ------- | :------------: | :---:
| `Source bus` | Identifier of the bus where the transmission line originates. | Required | No | Yes
| `Target bus` | Identifier of the bus where the transmission line reaches. | Required | No | Yes
| `Susceptance (S)` | Susceptance of the transmission line (in siemens). | Required | No | Yes
| `Normal flow limit (MW)` | Maximum amount of power (in MW) allowed to flow through the line when the system is in its regular, fully-operational state. | `+inf` | Yes | Yes
| `Emergency flow limit (MW)` | Maximum amount of power (in MW) allowed to flow through the line when the system is in degraded state (for example, after the failure of another transmission line). | `+inf` | Y | Yes
| `Flow limit penalty ($/MW)` | Penalty for violating the flow limits of the transmission line (in $/MW). This is charged per time step. For example, if there is a thermal violation of 1 MW for three time steps, then three times this amount will be charged. | `5000.0` | Yes | Yes
| Key | Description | Default | Time series?
| :--------------------- | :----------------------------------------------- | ------- | :------------:
| `Source bus` | Identifier of the bus where the transmission line originates. | Required | N
| `Target bus` | Identifier of the bus where the transmission line reaches. | Required | N
| `Reactance (ohms)` | Reactance of the transmission line (in ohms). | Required | N
| `Susceptance (S)` | Susceptance of the transmission line (in siemens). | Required | N
| `Normal flow limit (MW)` | Maximum amount of power (in MW) allowed to flow through the line when the system is in its regular, fully-operational state. | `+inf` | Y
| `Emergency flow limit (MW)` | Maximum amount of power (in MW) allowed to flow through the line when the system is in degraded state (for example, after the failure of another transmission line). | `+inf` | Y
| `Flow limit penalty ($/MW)` | Penalty for violating the flow limits of the transmission line (in $/MW). This is charged per time step. For example, if there is a thermal violation of 1 MW for three time steps, then three times this amount will be charged. | `5000.0` | Y
#### Example
@@ -294,6 +215,7 @@ This section describes the characteristics of transmission system, such as its t
"l1": {
"Source bus": "b1",
"Target bus": "b2",
"Reactance (ohms)": 0.05917,
"Susceptance (S)": 29.49686,
"Normal flow limit (MW)": 15000.0,
"Emergency flow limit (MW)": 20000.0,
@@ -309,11 +231,11 @@ This section describes the characteristics of transmission system, such as its t
This section describes the hourly amount of reserves required.
| Key | Description | Default | Time series? | Uncertain?
| :-------------------- | :------------------------------------------------- | --------- | :----: | :---:
| `Type` | Type of reserve product. Must be either "spinning" or "flexiramp". | Required | No | No
| `Amount (MW)` | Amount of reserves required. | Required | Yes | Yes
| `Shortfall penalty ($/MW)` | Penalty for shortage in meeting the reserve requirements (in $/MW). This is charged per time step. Negative value implies reserve constraints must always be satisfied. | `-1` | Yes | Yes
| Key | Description | Default | Time series?
| :-------------------- | :------------------------------------------------- | --------- | :----:
| `Type` | Type of reserve product. Must be either "spinning" or "flexiramp". | Required | N
| `Amount (MW)` | Amount of reserves required. | Required | Y
| `Shortfall penalty ($/MW)` | Penalty for shortage in meeting the reserve requirements (in $/MW). This is charged per time step. Negative value implies reserve constraints must always be satisfied. | `-1` | Y
#### Example 1
@@ -347,10 +269,10 @@ This section describes the hourly amount of reserves required.
This section describes credible contingency scenarios in the optimization, such as the loss of a transmission line or generator.
| Key | Description | Default | Uncertain?
| :-------------------- | :----------------------------------------------- | :--------: | :---:
| `Affected generators` | List of generators affected by this contingency. May be omitted if no generators are affected. | `[]` | Yes
| `Affected lines` | List of transmission lines affected by this contingency. May be omitted if no lines are affected. | `[]` | Yes
| Key | Description | Default
| :-------------------- | :----------------------------------------------- | ----------
| `Affected generators` | List of generators affected by this contingency. May be omitted if no generators are affected. | `[]`
| `Affected lines` | List of transmission lines affected by this contingency. May be omitted if no lines are affected. | `[]`
#### Example
@@ -401,8 +323,8 @@ The output data format is also JSON-based, but it is not currently documented si
Current limitations
-------------------
* Network topology must remain the same for all time periods.
* Network topology remains the same for all time periods
* Only N-1 transmission contingencies are supported. Generator contingencies are not currently supported.
* Time-varying minimum production amounts are not currently compatible with ramp/startup/shutdown limits.
* Flexible ramping products can only be acquired under the `WanHob2016` formulation, which does not support spinning reserves.
* The set of generators must be the same in all scenarios.

View File

@@ -1,12 +1,12 @@
# UnitCommitment.jl
**UnitCommitment.jl** (UC.jl) is an optimization package for the Security-Constrained Unit Commitment Problem (SCUC), a fundamental optimization problem in power systems used, for example, to clear the day-ahead electricity markets. Both deterministic and two-stage stochastic versions of the problem are supported. The package provides benchmark instances for the problem, a flexible and well-documented data format for the problem, as well as Julia/JuMP implementations of state-of-the-art mixed-integer programming formulations and solution methods.
**UnitCommitment.jl** (UC.jl) is a Julia/JuMP optimization package for the Security-Constrained Unit Commitment Problem (SCUC), a fundamental optimization problem in power systems used, for example, to clear the day-ahead electricity markets. The package provides benchmark instances for the problem and Julia/JuMP implementations of state-of-the-art mixed-integer programming formulations.
## Package Components
* **Data Format:** The package proposes an extensible and fully-documented JSON-based data specification format for SCUC, developed in collaboration with Independent System Operators (ISOs), which describes the most important aspects of the problem. The format supports all the most common thermal generator characteristics (including ramping, piecewise-linear production cost curves and time-dependent startup costs), as well as profiled generators, reserves, price-sensitive loads, battery storage, transmission, and contingencies.
* **Data Format:** The package proposes an extensible and fully-documented JSON-based data specification format for SCUC, developed in collaboration with Independent System Operators (ISOs), which describes the most important aspects of the problem. The format supports all the most common generator characteristics (including ramping, piecewise-linear production cost curves and time-dependent startup costs), as well as operating reserves, price-sensitive loads, transmission networks and contingencies.
* **Benchmark Instances:** The package provides a diverse collection of large-scale benchmark instances collected from the literature, converted into a common data format, and extended using data-driven methods to make them more challenging and realistic.
* **Model Implementation**: The package provides a Julia/JuMP implementations of state-of-the-art formulations and solution methods for the deterministic and stochastic SCUC, including multiple ramping formulations ([ArrCon2000](https://doi.org/10.1109/59.871739), [MorLatRam2013](https://doi.org/10.1109/TPWRS.2013.2251373), [DamKucRajAta2016](https://doi.org/10.1007/s10107-015-0919-9), [PanGua2016](https://doi.org/10.1287/opre.2016.1520)), piecewise-linear costs formulations ([Gar1962](https://doi.org/10.1109/AIEEPAS.1962.4501405), [CarArr2006](https://doi.org/10.1109/TPWRS.2006.876672), [KnuOstWat2018](https://doi.org/10.1109/TPWRS.2017.2783850)), contingency screening methods ([XavQiuWanThi2019](https://doi.org/10.1109/TPWRS.2019.2892620)) and decomposition methods. Our goal is to keep these implementations up-to-date as new methods are proposed in the literature.
* **Model Implementation**: The package provides a Julia/JuMP implementations of state-of-the-art formulations and solution methods for SCUC, including multiple ramping formulations ([ArrCon2000](https://doi.org/10.1109/59.871739), [MorLatRam2013](https://doi.org/10.1109/TPWRS.2013.2251373), [DamKucRajAta2016](https://doi.org/10.1007/s10107-015-0919-9), [PanGua2016](https://doi.org/10.1287/opre.2016.1520)), multiple piecewise-linear costs formulations ([Gar1962](https://doi.org/10.1109/AIEEPAS.1962.4501405), [CarArr2006](https://doi.org/10.1109/TPWRS.2006.876672), [KnuOstWat2018](https://doi.org/10.1109/TPWRS.2017.2783850)) and contingency screening methods ([XavQiuWanThi2019](https://doi.org/10.1109/TPWRS.2019.2892620)). Our goal is to keep these implementations up-to-date as new methods are proposed in the literature.
* **Benchmark Tools:** The package provides automated benchmark scripts to accurately evaluate the performance impact of proposed code changes.
## Table of Contents
@@ -35,7 +35,7 @@ Depth = 3
If you use UnitCommitment.jl in your research (instances, models or algorithms), we kindly request that you cite the package as follows:
* **Alinson S. Xavier, Aleksandr M. Kazachkov, Ogün Yurdakul, Jun He, Feng Qiu**, "UnitCommitment.jl: A Julia/JuMP Optimization Package for Security-Constrained Unit Commitment (Version 0.4)". Zenodo (2023). [DOI: 10.5281/zenodo.4269874](https://doi.org/10.5281/zenodo.4269874).
* **Alinson S. Xavier, Aleksandr M. Kazachkov, Ogün Yurdakul, Feng Qiu**, "UnitCommitment.jl: A Julia/JuMP Optimization Package for Security-Constrained Unit Commitment (Version 0.3)". Zenodo (2022). [DOI: 10.5281/zenodo.4269874](https://doi.org/10.5281/zenodo.4269874).
If you use the instances, we additionally request that you cite the original sources, as described in the [instances page](instances.md).
@@ -43,7 +43,7 @@ If you use the instances, we additionally request that you cite the original sou
```text
UnitCommitment.jl: A Julia/JuMP Optimization Package for Security-Constrained Unit Commitment
Copyright © 2020-2023, UChicago Argonne, LLC. All Rights Reserved.
Copyright © 2020-2022, UChicago Argonne, LLC. All Rights Reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted
provided that the following conditions are met:

View File

@@ -1,69 +1,57 @@
JuMP Model
==========
In this page, we describe the JuMP optimization model produced by the function `build_model`. A detailed understanding of this model is not necessary if you are just interested in using the package to solve some standard unit commitment cases, but it may be useful, for example, if you need to solve a slightly different problem, with additional variables and constraints. The notation in this page generally follows [KnOsWa20].
In this page, we describe the JuMP optimization model produced by the function `UnitCommitment.build_model`. A detailed understanding of this model is not necessary if you are just interested in using the package to solve some standard unit commitment cases, but it may be useful, for example, if you need to solve a slightly different problem, with additional variables and constraints. The notation in this page generally follows [KnOsWa20].
Decision variables
------------------
UC.jl models the security-constrained unit commitment problem as a two-stage stochastic program. In this approach, some of the decision variables are *first-stage decisions*, which are taken before the uncertainty is realized and must therefore be the same across all scenarios, while the remaining variables are *second-stage decisions*, which can attain a different values in each scenario. In the current version of the package, all binary variables (which model commitment decisions of thermal units) are first-stage decisions and all continuous variables are second-stage decisions.
!!! note
UC.jl treats deterministic SCUC instances as a special case of the stochastic problem in which there is only one scenario, named `"s1"` by default. To access second-stage decisions, therefore, you must provide this scenario name as the value for `sn`. For example, `model[:prod_above]["s1", g, t]`.
### Generators
In this section, we describe the decision variables associated with the generators, which include both thermal units (e.g., natural gas-fired power plant) and profiled units (e.g., wind turbine).
#### Thermal Units
Name | Description | Unit | Stage
:-----|:-------------|:------: | :------:
`is_on[g,t]` | True if generator `g` is on at time `t`. | Binary | 1
`switch_on[g,t]` | True is generator `g` switches on at time `t`. | Binary| 1
`switch_off[g,t]` | True if generator `g` switches off at time `t`. | Binary| 1
`startup[g,t,s]` | True if generator `g` switches on at time `t` incurring start-up costs from start-up category `s`. | Binary| 1
`prod_above[sn,g,t]` | Amount of power produced by generator `g` above its minimum power output at time `t` in scenario `sn`. For example, if the minimum power of generator `g` is 100 MW and `g` is producing 115 MW of power at time `t` in scenario `sn`, then `prod_above[sn,g,t]` equals `15.0`. | MW | 2
`segprod[sn,g,t,k]` | Amount of power from piecewise linear segment `k` produced by generator `g` at time `t` in scenario `sn`. For example, if cost curve for generator `g` is defined by the points `(100, 1400)`, `(110, 1600)`, `(130, 2200)` and `(135, 2400)`, and if the generator is producing 115 MW of power at time `t` in scenario `sn`, then `segprod[sn,g,t,:]` equals `[10.0, 5.0, 0.0]`.| MW | 2
`reserve[sn,r,g,t]` | Amount of reserve `r` provided by unit `g` at time `t` in scenario `sn`. | MW | 2
Name | Symbol | Description | Unit
:-----|:--------:|:-------------|:------:
`is_on[g,t]` | $u_{g}(t)$ | True if generator `g` is on at time `t`. | Binary
`switch_on[g,t]` | $v_{g}(t)$ | True is generator `g` switches on at time `t`. | Binary
`switch_off[g,t]` | $w_{g}(t)$ | True if generator `g` switches off at time `t`. | Binary
`prod_above[g,t]` |$p'_{g}(t)$ | Amount of power produced by generator `g` above its minimum power output at time `t`. For example, if the minimum power of generator `g` is 100 MW and `g` is producing 115 MW of power at time `t`, then `prod_above[g,t]` equals `15.0`. | MW
`segprod[g,t,k]` | $p^k_g(t)$ | Amount of power from piecewise linear segment `k` produced by generator `g` at time `t`. For example, if cost curve for generator `g` is defined by the points `(100, 1400)`, `(110, 1600)`, `(130, 2200)` and `(135, 2400)`, and if the generator is producing 115 MW of power at time `t`, then `segprod[g,t,:]` equals `[10.0, 5.0, 0.0]`.| MW
`reserve[r,g,t]` | $r_g(t)$ | Amount of reserve `r` provided by unit `g` at time `t`. | MW
`startup[g,t,s]` | $\delta^s_g(t)$ | True if generator `g` switches on at time `t` incurring start-up costs from start-up category `s`. | Binary
!!! warning
The first-stage decision variables of the JuMP model are `is_on[g,t]`, `switch_on[g,t]`, `switch_off[g,t]`, and `startup[g,t,s]`. As such, the dictionaries corresponding to these variables do not include the scenario index in their keys. In contrast, all other variables of the created JuMP model are allowed to obtain a different value in each scenario and are thus modeled as second-stage decision variables. Accordingly, the dictionaries of all second-stage decision variables have the scenario index in their keys. This is true even if the model is created to solve the deterministic SCUC, in which case the default scenario index `s1` is included in the dictionary key.
#### Profiled Units
Name | Description | Unit | Stage
:-----|:-------------|:------: | :------:
`prod_profiled[s,t]` | Amount of power produced by profiled unit `g` at time `t`. | MW | 2
Name | Symbol | Description | Unit
:-----|:------:|:-------------|:------:
`prod_profiled[s,t]` | $p^{\dagger}_{g}(t)$ | Amount of power produced by profiled unit `g` at time `t`. | MW
### Buses
Name | Description | Unit | Stage
:-----|:-------------|:------:| :------:
`net_injection[sn,b,t]` | Net injection at bus `b` at time `t` in scenario `sn`. | MW | 2
`curtail[sn,b,t]` | Amount of load curtailed at bus `b` at time `t` in scenario `sn`. | MW | 2
Name | Symbol | Description | Unit
:-----|:------:|:-------------|:------:
`net_injection[b,t]` | $n_b(t)$ | Net injection at bus `b` at time `t`. | MW
`curtail[b,t]` | $s^+_b(t)$ | Amount of load curtailed at bus `b` at time `t` | MW
### Price-sensitive loads
Name | Description | Unit | Stage
:-----|:-------------|:------:| :------:
`loads[sn,s,t]` | Amount of power served to price-sensitive load `s` at time `t` in scenario `sn`. | MW | 2
Name | Symbol | Description | Unit
:-----|:------:|:-------------|:------:
`loads[s,t]` | $d_{s}(t)$ | Amount of power served to price-sensitive load `s` at time `t`. | MW
### Transmission lines
Name | Description | Unit | Stage
:-----|:-------------|:------:| :------:
`flow[sn,l,t]` | Power flow on line `l` at time `t` in scenario `sn`. | MW | 2
`overflow[sn,l,t]` | Amount of flow above the limit for line `l` at time `t` in scenario `sn`. | MW | 2
Name | Symbol | Description | Unit
:-----|:------:|:-------------|:------:
`flow[l,t]` | $f_l(t)$ | Power flow on line `l` at time `t`. | MW
`overflow[l,t]` | $f^+_l(t)$ | Amount of flow above the limit for line `l` at time `t`. | MW
!!! warning
Since transmission and N-1 security constraints are enforced in a lazy way, most of the `flow[l,t]` variables are never added to the model. Accessing `model[:flow][sn,l,t]` without first checking that the variable exists will likely generate an error.
Since transmission and N-1 security constraints are enforced in a lazy way, most of the `flow[l,t]` variables are never added to the model. Accessing `model[:flow][l,t]` without first checking that the variable exists will likely generate an error.
Objective function
------------------
@@ -118,12 +106,7 @@ end
### Fixing variables, modifying objective function and adding constraints
Since we now have a direct reference to the JuMP decision variables, it is possible to fix variables, change the coefficients in the objective function, or even add new constraints to the model before solving it.
!!! warning
It is important to take into account the stage of the decision variables in modifying the optimization model. In changing a deterministic SCUC model, modifying the second-stage decision variables requires adding the term `s1`, which is the default scenario name assigned to the second-stage decision variables in the SCUC model. For an SUC model, the package permits the modification of the second-stage decision variables individually for each scenario.
The script below shows how the JuMP model can be modified after it is created. For more information on modifying an existing model, [see the JuMP documentation](https://jump.dev/JuMP.jl/stable/manual/variables/).
Since we now have a direct reference to the JuMP decision variables, it is possible to fix variables, change the coefficients in the objective function, or even add new constraints to the model before solving it. The script below shows how can this be accomplished. For more information on modifying an existing model, [see the JuMP documentation](https://jump.dev/JuMP.jl/stable/manual/variables/).
```julia
using Cbc
@@ -139,29 +122,13 @@ model = UnitCommitment.build_model(
optimizer=Cbc.Optimizer,
)
# Fix the commitment status of the generator "g1" in time period 1 to 1.0
# Fix a decision variable to 1.0
JuMP.fix(
model[:is_on]["g1",1],
1.0,
force=true,
)
# Fix the production level of the generator "g1" above its minimum level in time period 1 and
# in scenario "s1" to 20.0 MW. Observe that the three-tuple dictionary key involves the scenario
# index "s1", as production above minimum is a second-stage decision variable.
JuMP.fix(
model[:prod_above]["s1", "g1", 1],
20.0,
force=true,
)
# Enforce the curtailment of 20.0 MW of load at bus "b2" in time period 4 in scenario "s1".
JuMP.fix(
curtail["s1", "b2", 4] =
20.0,
force=true,
)
# Change the objective function
JuMP.set_objective_coefficient(
model,
@@ -211,10 +178,10 @@ for t in 1:T
# In this example, we assume a cost of $5/MW.
set_objective_coefficient(model, x[t], 5.0)
# Attach the new component to bus b1 in scenario s1, by modifying the
# Attach the new component to bus b1, by modifying the
# constraint `eq_net_injection`.
set_normalized_coefficient(
model[:eq_net_injection]["s1", "b1", t],
model[:eq_net_injection]["b1", t],
x[t],
1.0,
)

View File

@@ -4,33 +4,41 @@ Usage
Installation
------------
UnitCommitment.jl was tested and developed with [Julia 1.9](https://julialang.org/). To install Julia, please follow the [installation guide on the official Julia website](https://julialang.org/downloads/). To install UnitCommitment.jl, run the Julia interpreter, type `]` to open the package manager, then type:
UnitCommitment.jl was tested and developed with [Julia 1.7](https://julialang.org/). To install Julia, please follow the [installation guide on the official Julia website](https://julialang.org/downloads/). To install UnitCommitment.jl, run the Julia interpreter, type `]` to open the package manager, then type:
```text
pkg> add UnitCommitment@0.4
pkg> add UnitCommitment@0.3
```
To solve the optimization models, a mixed-integer linear programming (MILP) solver is also required. Please see the [JuMP installation guide](https://jump.dev/JuMP.jl/stable/installation/) for more instructions on installing a solver. Typical open-source choices are [HiGHS](https://github.com/jump-dev/HiGHS.jl), [Cbc](https://github.com/JuliaOpt/Cbc.jl) and [GLPK](https://github.com/JuliaOpt/GLPK.jl). In the instructions below, HiGHS will be used, but any other MILP solver listed in JuMP installation guide should also be compatible.
To test that the package has been correctly installed, run:
```text
pkg> test UnitCommitment
```
If all tests pass, the package should now be ready to be used by any Julia script on the machine.
To solve the optimization models, a mixed-integer linear programming (MILP) solver is also required. Please see the [JuMP installation guide](https://jump.dev/JuMP.jl/stable/installation/) for more instructions on installing a solver. Typical open-source choices are [Cbc](https://github.com/JuliaOpt/Cbc.jl) and [GLPK](https://github.com/JuliaOpt/GLPK.jl). In the instructions below, Cbc will be used, but any other MILP solver listed in JuMP installation guide should also be compatible.
Typical Usage
-------------
### Solving user-provided instances
The first step to use UC.jl is to construct JSON files that describe each scenario of your deterministic or stochastic unit commitment instance. See [Data Format](format.md) for a complete description of the data format UC.jl expects. The next steps, as shown below, are to: (1) read the scenario files; (2) build the optimization model; (3) run the optimization; and (4) extract the optimal solution.
The first step to use UC.jl is to construct a JSON file describing your unit commitment instance. See [Data Format](format.md) for a complete description of the data format UC.jl expects. The next steps, as shown below, are to: (1) read the instance from file; (2) construct the optimization model; (3) run the optimization; and (4) extract the optimal solution.
```julia
using HiGHS
using JuMP
using Cbc
using JSON
using UnitCommitment
# 1. Read instance
instance = UnitCommitment.read(["example/s1.json", "example/s2.json"])
instance = UnitCommitment.read("/path/to/input.json")
# 2. Construct optimization model
model = UnitCommitment.build_model(
instance=instance,
optimizer=HiGHS.Optimizer,
optimizer=Cbc.Optimizer,
)
# 3. Solve model
@@ -38,39 +46,16 @@ UnitCommitment.optimize!(model)
# 4. Write solution to a file
solution = UnitCommitment.solution(model)
UnitCommitment.write("example/out.json", solution)
```
To read multiple files from a given folder, the [Glob](https://github.com/vtjnash/Glob.jl) package can be used:
```jldoctest usage1; output = false
using Glob
using UnitCommitment
instance = UnitCommitment.read(glob("s*.json", "example/"))
# output
UnitCommitmentInstance(2 scenarios, 6 thermal units, 0 profiled units, 14 buses, 20 lines, 19 contingencies, 1 price sensitive loads, 4 time steps)
```
To solve deterministic instances, a single scenario file may be provided.
```jldoctest usage1; output = false
instance = UnitCommitment.read("example/s1.json")
# output
UnitCommitmentInstance(1 scenarios, 6 thermal units, 0 profiled units, 14 buses, 20 lines, 19 contingencies, 1 price sensitive loads, 4 time steps)
UnitCommitment.write("/path/to/output.json", solution)
```
### Solving benchmark instances
UnitCommitment.jl contains a large number of deterministic benchmark instances collected from the literature and converted into a common data format. To solve one of these instances individually, instead of constructing your own, the function `read_benchmark` can be used, as shown below. See [Instances](instances.md) for the complete list of available instances.
UnitCommitment.jl contains a large number of benchmark instances collected from the literature and converted into a common data format. To solve one of these instances individually, instead of constructing your own, the function `read_benchmark` can be used, as shown below. See [Instances](instances.md) for the complete list of available instances.
```jldoctest usage1; output = false
```julia
using UnitCommitment
instance = UnitCommitment.read_benchmark("matpower/case3375wp/2017-02-01")
# output
UnitCommitmentInstance(1 scenarios, 590 thermal units, 0 profiled units, 3374 buses, 4161 lines, 3245 contingencies, 0 price sensitive loads, 36 time steps)
```
## Customizing the formulation
@@ -78,7 +63,7 @@ UnitCommitmentInstance(1 scenarios, 590 thermal units, 0 profiled units, 3374 bu
By default, `build_model` uses a formulation that combines modeling components from different publications, and that has been carefully tested, using our own benchmark scripts, to provide good performance across a wide variety of instances. This default formulation is expected to change over time, as new methods are proposed in the literature. You can, however, construct your own formulation, based on the modeling components that you choose, as shown in the next example.
```julia
using HiGHS
using Cbc
using UnitCommitment
import UnitCommitment:
@@ -93,7 +78,7 @@ instance = UnitCommitment.read_benchmark(
model = UnitCommitment.build_model(
instance = instance,
optimizer = HiGHS.Optimizer,
optimizer = Cbc.Optimizer,
formulation = Formulation(
pwl_costs = KnuOstWat2018.PwlCosts(),
ramping = MorLatRam2013.Ramping(),
@@ -108,24 +93,24 @@ model = UnitCommitment.build_model(
## Generating initial conditions
When creating random unit commitment instances for benchmark purposes, it is often hard to compute, in advance, sensible initial conditions for all thermal generators. Setting initial conditions naively (for example, making all generators initially off and producing no power) can easily cause the instance to become infeasible due to excessive ramping. Initial conditions can also make it hard to modify existing instances. For example, increasing the system load without carefully modifying the initial conditions may make the problem infeasible or unrealistically challenging to solve.
When creating random unit commitment instances for benchmark purposes, it is often hard to compute, in advance, sensible initial conditions for all generators. Setting initial conditions naively (for example, making all generators initially off and producing no power) can easily cause the instance to become infeasible due to excessive ramping. Initial conditions can also make it hard to modify existing instances. For example, increasing the system load without carefully modifying the initial conditions may make the problem infeasible or unrealistically challenging to solve.
To help with this issue, UC.jl provides a utility function which can generate feasible initial conditions by solving a single-period optimization problem, as shown below:
```julia
using HiGHS
using Cbc
using UnitCommitment
# Read original instance
instance = UnitCommitment.read("example/s1.json")
instance = UnitCommitment.read("instance.json")
# Generate initial conditions (in-place)
UnitCommitment.generate_initial_conditions!(instance, HiGHS.Optimizer)
UnitCommitment.generate_initial_conditions!(instance, Cbc.Optimizer)
# Construct and solve optimization model
model = UnitCommitment.build_model(
instance=instance,
optimizer=HiGHS.Optimizer,
optimizer=Cbc.Optimizer,
)
UnitCommitment.optimize!(model)
```
@@ -136,76 +121,22 @@ UnitCommitment.optimize!(model)
## Verifying solutions
When developing new formulations, it is very easy to introduce subtle errors in the model that result in incorrect solutions. To help avoiding this, UC.jl includes a utility function that verifies if a given solution is feasible, and, if not, prints all the validation errors it found. The implementation of this function is completely independent from the implementation of the optimization model, and therefore can be used to validate it.
When developing new formulations, it is very easy to introduce subtle errors in the model that result in incorrect solutions. To help with this, UC.jl includes a utility function that verifies if a given solution is feasible, and, if not, prints all the validation errors it found. The implementation of this function is completely independent from the implementation of the optimization model, and therefore can be used to validate it. The function can also be used to verify solutions produced by other optimization packages, as long as they follow the [UC.jl data format](format.md).
```jldoctest; output = false
```julia
using JSON
using UnitCommitment
# Read instance
instance = UnitCommitment.read("example/s1.json")
instance = UnitCommitment.read("instance.json")
# Read solution (potentially produced by other packages)
solution = JSON.parsefile("example/out.json")
solution = JSON.parsefile("solution.json")
# Validate solution and print validation errors
UnitCommitment.validate(instance, solution)
# output
true
```
## Progressive Hedging
By default, UC.jl uses the Extensive Form (EF) when solving stochastic instances. This approach involves constructing a single JuMP model that contains data and decision variables for all scenarios. Although EF has optimality guarantees and performs well with small test cases, it can become computationally intractable for large instances or substantial number of scenarios.
Progressive Hedging (PH) is an alternative (heuristic) solution method provided by UC.jl in which the problem is decomposed into smaller scenario-based subproblems, which are then solved in parallel in separate Julia processes, potentially across multiple machines. Quadratic penalty terms are used to enforce convergence of first-stage decision variables. The method is closely related to the Alternative Direction Method of Multipliers (ADMM) and can handle larger instances, although it is not guaranteed to converge to the optimal solution. Our implementation of PH relies on Message Passing Interface (MPI) for communication. We refer to [MPI.jl Documentation](https://github.com/JuliaParallel/MPI.jl) for more details on installing MPI.
The following example shows how to solve SCUC instances using progressive hedging. The script should be saved in a file, say `ph.jl`, and executed using `mpiexec -n <num-scenarios> julia ph.jl`.
```julia
using HiGHS
using MPI
using UnitCommitment
using Glob
# 1. Initialize MPI
MPI.Init()
# 2. Configure progressive hedging method
ph = UnitCommitment.ProgressiveHedging()
# 3. Read problem instance
instance = UnitCommitment.read(["example/s1.json", "example/s2.json"], ph)
# 4. Build JuMP model
model = UnitCommitment.build_model(
instance = instance,
optimizer = HiGHS.Optimizer,
)
# 5. Run the decentralized optimization algorithm
UnitCommitment.optimize!(model, ph)
# 6. Fetch the solution
solution = UnitCommitment.solution(model, ph)
# 7. Close MPI
MPI.Finalize()
```
When using PH, the model can be customized as usual, with different formulations or additional user-provided constraints. Note that `read`, in this case, takes `ph` as an argument. This allows each Julia process to read only the instance files that are relevant to it. Similarly, the `solution` function gathers the optimal solution of each processes and returns a combined dictionary.
Each process solves a sub-problem with $\frac{s}{p}$ scenarios, where $s$ is the total number of scenarios and $p$ is the number of MPI processes. For instance, if we have 15 scenario files and 5 processes, then each process will solve a JuMP model that contains data for 3 scenarios. If the total number of scenarios is not divisible by the number of processes, then an error will be thrown.
!!! warning
Currently, PH can handle only equiprobable scenarios. Further, `solution(model, ph)` can only handle cases where only one scenario is modeled in each process.
## Computing Locational Marginal Prices
Locational marginal prices (LMPs) refer to the cost of supplying electricity at a particular location of the network. Multiple methods for computing LMPs have been proposed in the literature. UnitCommitment.jl implements two commonly-used methods: conventional LMPs and Approximated Extended LMPs (AELMPs). To compute LMPs for a given unit commitment instance, the `compute_lmp` function can be used, as shown in the examples below. The function accepts three arguments -- a solved SCUC model, an LMP method, and a linear optimizer -- and it returns a dictionary mapping `(bus_name, time)` to the marginal price.
@@ -292,90 +223,4 @@ aelmp = UnitCommitment.compute_lmp(
# Example: "s1" is the scenario name, "b1" is the bus name, 1 is the first time slot
# Note: although scenario is supported, the query still keeps the scenario keys for consistency.
@show aelmp["s1", "b1", 1]
```
## Time Decomposition
Solving unit commitment instances that have long time horizons (for example, year-long 8760-hour instances) requires a substantial amount of computational power. To address this issue, UC.jl offers a time decomposition method, which breaks the instance down into multiple overlapping subproblems, solves them sequentially, then reassembles the solution.
When solving a unit commitment instance with a dense time slot structure, computational complexity can become a significant challenge. For instance, if the instance contains hourly data for an entire year (8760 hours), solving such a model can require a substantial amount of computational power. To address this issue, UC.jl provides a time_decomposition method within the `optimize!` function. This method decomposes the problem into multiple sub-problems, solving them sequentially.
The `optimize!` function takes 5 parameters: a unit commitment instance, a `TimeDecomposition` method, an optimizer, and two optional functions `after_build` and `after_optimize`. It returns a solution dictionary. The `TimeDecomposition` method itself requires four arguments: `time_window`, `time_increment`, `inner_method` (optional), and `formulation` (optional). These arguments define the time window for each sub-problem, the time increment to move to the next sub-problem, the method used to solve each sub-problem, and the formulation employed, respectively. The two functions, namely `after_build` and `after_optimize`, are invoked subsequent to the construction and optimization of each sub-model, respectively. It is imperative that the `after_build` function requires its two arguments to be consistently mapped to `model` and `instance`, while the `after_optimize` function necessitates its three arguments to be consistently mapped to `solution`, `model`, and `instance`.
The code snippet below illustrates an example of solving an instance by decomposing the model into multiple 36-hour sub-problems using the `XavQiuWanThi2019` method. Each sub-problem advances 24 hours at a time. The first sub-problem covers time steps 1 to 36, the second covers time steps 25 to 60, the third covers time steps 49 to 84, and so on. The initial power levels and statuses of the second and subsequent sub-problems are set based on the results of the first 24 hours from each of their immediate prior sub-problems. In essence, this approach addresses the complexity of solving a large problem by tackling it in 24-hour intervals, while incorporating an additional 12-hour buffer to mitigate the closing window effect for each sub-problem. Furthermore, the `after_build` function imposes the restriction that `g3` and `g4` cannot be activated simultaneously during the initial time slot of each sub-problem. On the other hand, the `after_optimize` function is invoked to calculate the conventional Locational Marginal Prices (LMPs) for each sub-problem, and subsequently appends the computed values to the `lmps` vector.
> **Warning**
> Specifying `TimeDecomposition` as the value of the `inner_method` field of another `TimeDecomposition` causes errors when calling the `optimize!` function due to the different argument structures between the two `optimize!` functions.
```julia
using UnitCommitment, JuMP, Cbc, HiGHS
import UnitCommitment:
TimeDecomposition,
ConventionalLMP,
XavQiuWanThi2019,
Formulation
# specifying the after_build and after_optimize functions
function after_build(model, instance)
@constraint(
model,
model[:is_on]["g3", 1] + model[:is_on]["g4", 1] <= 1,
)
end
lmps = []
function after_optimize(solution, model, instance)
lmp = UnitCommitment.compute_lmp(
model,
ConventionalLMP(),
optimizer = HiGHS.Optimizer,
)
return push!(lmps, lmp)
end
# assume the instance is given as a 120h problem
instance = UnitCommitment.read("instance.json")
solution = UnitCommitment.optimize!(
instance,
TimeDecomposition(
time_window = 36, # solve 36h problems
time_increment = 24, # advance by 24h each time
inner_method = XavQiuWanThi2019.Method(),
formulation = Formulation(),
),
optimizer = Cbc.Optimizer,
after_build = after_build,
after_optimize = after_optimize,
)
```
## Day-ahead (DA) Market to Real-time (RT) Markets
The UC.jl package offers a comprehensive set of functions for solving marketing problems. The primary function, `solve_market`, facilitates the solution of day-ahead (DA) markets, which can be either deterministic or stochastic in nature. Subsequently, it sequentially maps the commitment status obtained from the DA market to all the real-time (RT) markets, which are deterministic instances. It is essential to ensure that the time span of the DA market encompasses all the RT markets, and the file paths for the RT markets must be specified in chronological order. Each RT market should represent a single time slot, and it is recommended to include a few additional time slots to mitigate the closing window effect.
The `solve_market` function accepts several parameters, including the file path (or a list of file paths in the case of stochastic markets) for the DA market, a list of file paths for the RT markets, the market settings specified by the `MarketSettings` structure, and an optimizer. The `MarketSettings` structure itself requires three optional arguments: `inner_method`, `lmp_method`, and `formulation`. If the computation of Locational Marginal Prices (LMPs) is not desired, the `lmp_method` can be set to `nothing`. Additional optional parameters include a linear programming optimizer for solving LMPs (if a different optimizer than the required one is desired), callback functions `after_build_da` and `after_optimize_da`, which are invoked after the construction and optimization of the DA market, and callback functions `after_build_rt` and `after_optimize_rt`, which are invoked after the construction and optimization of each RT market. It is crucial to note that the `after_build` function requires its two arguments to consistently correspond to `model` and `instance`, while the `after_optimize` function requires its three arguments to consistently correspond to `solution`, `model`, and `instance`.
As an illustrative example, suppose the DA market predicts hourly data for a 24-hour period, while the RT markets represent 5-minute intervals. In this scenario, each RT market file corresponds to a specific 5-minute interval, with the first RT market representing the initial 5 minutes, the second RT market representing the subsequent 5 minutes, and so on. Consequently, there should be 12 RT market files for each hour. To mitigate the closing window effect, except for the last few RT markets, each RT market should contain three time slots, resulting in a total time span of 15 minutes. However, only the first time slot is considered in the final solution. The last two RT markets should only contain 2 and 1 time slot(s), respectively, to ensure that the total time covered by all RT markets does not exceed the time span of the DA market. The code snippet below demonstrates a simplified example of how to utilize the `solve_market` function. Please note that it only serves as a simplified example and may require further customization based on the specific requirements of your use case.
```julia
using UnitCommitment, Cbc, HiGHS
import UnitCommitment:
MarketSettings,
XavQiuWanThi2019,
ConventionalLMP,
Formulation
solution = UnitCommitment.solve_market(
"da_instance.json",
["rt_instance_1.json", "rt_instance_2.json", "rt_instance_3.json"],
MarketSettings(
inner_method = XavQiuWanThi2019.Method(),
lmp_method = ConventionalLMP(),
formulation = Formulation(),
),
optimizer = Cbc.Optimizer,
lp_optimizer = HiGHS.Optimizer,
)
```
```

View File

@@ -4,15 +4,12 @@
module UnitCommitment
using Requires
using Base: String
include("instance/structs.jl")
include("model/formulations/base/structs.jl")
include("solution/structs.jl")
include("lmp/structs.jl")
include("market/structs.jl")
include("model/formulations/ArrCon2000/structs.jl")
include("model/formulations/CarArr2006/structs.jl")
@@ -22,9 +19,7 @@ include("model/formulations/KnuOstWat2018/structs.jl")
include("model/formulations/MorLatRam2013/structs.jl")
include("model/formulations/PanGua2016/structs.jl")
include("solution/methods/XavQiuWanThi2019/structs.jl")
include("solution/methods/ProgressiveHedging/structs.jl")
include("model/formulations/WanHob2016/structs.jl")
include("solution/methods/TimeDecomposition/structs.jl")
include("import/egret.jl")
include("instance/read.jl")
@@ -38,7 +33,6 @@ include("model/formulations/base/sensitivity.jl")
include("model/formulations/base/system.jl")
include("model/formulations/base/unit.jl")
include("model/formulations/base/punit.jl")
include("model/formulations/base/storage.jl")
include("model/formulations/CarArr2006/pwlcosts.jl")
include("model/formulations/DamKucRajAta2016/ramp.jl")
include("model/formulations/Gar1962/pwlcosts.jl")
@@ -55,10 +49,6 @@ include("solution/methods/XavQiuWanThi2019/enforce.jl")
include("solution/methods/XavQiuWanThi2019/filter.jl")
include("solution/methods/XavQiuWanThi2019/find.jl")
include("solution/methods/XavQiuWanThi2019/optimize.jl")
include("solution/methods/TimeDecomposition/optimize.jl")
include("solution/methods/ProgressiveHedging/optimize.jl")
include("solution/methods/ProgressiveHedging/read.jl")
include("solution/methods/ProgressiveHedging/solution.jl")
include("solution/optimize.jl")
include("solution/solution.jl")
include("solution/warmstart.jl")
@@ -72,13 +62,5 @@ include("validation/repair.jl")
include("validation/validate.jl")
include("lmp/conventional.jl")
include("lmp/aelmp.jl")
include("market/market.jl")
function __init__()
@require MIPLearn = "2b1277c3-b477-4c49-a15e-7ba350325c68" begin
include("solution/methods/MIPLearn/structs.jl")
include("solution/methods/MIPLearn/miplearn.jl")
end
end
end

View File

@@ -136,35 +136,22 @@ function _from_json(json; repair = true)::UnitCommitmentScenario
loads = PriceSensitiveLoad[]
reserves = Reserve[]
profiled_units = ProfiledUnit[]
storage_units = StorageUnit[]
function scalar(x; default = nothing)
x !== nothing || return default
return x
end
time_horizon = json["Parameters"]["Time horizon (min)"]
time_horizon = json["Parameters"]["Time (h)"]
if time_horizon === nothing
time_horizon = json["Parameters"]["Time (h)"]
if time_horizon === nothing
time_horizon = json["Parameters"]["Time horizon (h)"]
end
if time_horizon !== nothing
time_horizon *= 60
end
time_horizon = json["Parameters"]["Time horizon (h)"]
end
time_horizon !== nothing || error("Missing parameter: Time horizon (min)")
isinteger(time_horizon) ||
error("Time horizon must be an integer in minutes")
time_horizon = Int(time_horizon)
time_horizon !== nothing || error("Missing parameter: Time horizon (h)")
time_step = scalar(json["Parameters"]["Time step (min)"], default = 60)
(60 % time_step == 0) ||
error("Time step $time_step is not a divisor of 60")
(time_horizon % time_step == 0) || error(
"Time step $time_step is not a divisor of time horizon $time_horizon",
)
time_multiplier = 60 ÷ time_step
T = time_horizon ÷ time_step
T = time_horizon * time_multiplier
probability = json["Parameters"]["Scenario weight"]
probability !== nothing || (probability = 1)
@@ -197,7 +184,6 @@ function _from_json(json; repair = true)::UnitCommitmentScenario
ThermalUnit[],
PriceSensitiveLoad[],
ProfiledUnit[],
StorageUnit[],
)
name_to_bus[bus_name] = bus
push!(buses, bus)
@@ -354,6 +340,7 @@ function _from_json(json; repair = true)::UnitCommitmentScenario
length(lines) + 1,
name_to_bus[dict["Source bus"]],
name_to_bus[dict["Target bus"]],
scalar(dict["Reactance (ohms)"]),
scalar(dict["Susceptance (S)"]),
timeseries(
dict["Normal flow limit (MW)"],
@@ -406,52 +393,6 @@ function _from_json(json; repair = true)::UnitCommitmentScenario
end
end
# Read storage units
if "Storage units" in keys(json)
for (storage_name, dict) in json["Storage units"]
bus = name_to_bus[dict["Bus"]]
min_level =
timeseries(scalar(dict["Minimum level (MWh)"], default = 0.0))
max_level = timeseries(dict["Maximum level (MWh)"])
storage = StorageUnit(
storage_name,
bus,
min_level,
max_level,
timeseries(
scalar(
dict["Allow simultaneous charging and discharging"],
default = true,
),
),
timeseries(dict["Charge cost (\$/MW)"]),
timeseries(dict["Discharge cost (\$/MW)"]),
timeseries(scalar(dict["Charge efficiency"], default = 1.0)),
timeseries(scalar(dict["Discharge efficiency"], default = 1.0)),
timeseries(scalar(dict["Loss factor"], default = 0.0)),
timeseries(
scalar(dict["Minimum charge rate (MW)"], default = 0.0),
),
timeseries(dict["Maximum charge rate (MW)"]),
timeseries(
scalar(dict["Minimum discharge rate (MW)"], default = 0.0),
),
timeseries(dict["Maximum discharge rate (MW)"]),
scalar(dict["Initial level (MWh)"], default = 0.0),
scalar(
dict["Last period minimum level (MWh)"],
default = min_level[T],
),
scalar(
dict["Last period maximum level (MWh)"],
default = max_level[T],
),
)
push!(bus.storage_units, storage)
push!(storage_units, storage)
end
end
scenario = UnitCommitmentScenario(
name = scenario_name,
probability = probability,
@@ -467,13 +408,10 @@ function _from_json(json; repair = true)::UnitCommitmentScenario
reserves = reserves,
reserves_by_name = name_to_reserve,
time = T,
time_step = time_step,
thermal_units_by_name = Dict(g.name => g for g in thermal_units),
thermal_units = thermal_units,
profiled_units_by_name = Dict(pu.name => pu for pu in profiled_units),
profiled_units = profiled_units,
storage_units_by_name = Dict(su.name => su for su in storage_units),
storage_units = storage_units,
isf = spzeros(Float64, length(lines), length(buses) - 1),
lodf = spzeros(Float64, length(lines), length(lines)),
)

View File

@@ -9,7 +9,6 @@ mutable struct Bus
thermal_units::Vector
price_sensitive_loads::Vector
profiled_units::Vector
storage_units::Vector
end
mutable struct CostSegment
@@ -56,6 +55,7 @@ mutable struct TransmissionLine
offset::Int
source::Bus
target::Bus
reactance::Float64
susceptance::Float64
normal_flow_limit::Vector{Float64}
emergency_flow_limit::Vector{Float64}
@@ -83,26 +83,6 @@ mutable struct ProfiledUnit
cost::Vector{Float64}
end
mutable struct StorageUnit
name::String
bus::Bus
min_level::Vector{Float64}
max_level::Vector{Float64}
simultaneous_charge_and_discharge::Vector{Bool}
charge_cost::Vector{Float64}
discharge_cost::Vector{Float64}
charge_efficiency::Vector{Float64}
discharge_efficiency::Vector{Float64}
loss_factor::Vector{Float64}
min_charge_rate::Vector{Float64}
max_charge_rate::Vector{Float64}
min_discharge_rate::Vector{Float64}
max_discharge_rate::Vector{Float64}
initial_level::Float64
min_ending_level::Float64
max_ending_level::Float64
end
Base.@kwdef mutable struct UnitCommitmentScenario
buses_by_name::Dict{AbstractString,Bus}
buses::Vector{Bus}
@@ -123,10 +103,7 @@ Base.@kwdef mutable struct UnitCommitmentScenario
reserves::Vector{Reserve}
thermal_units_by_name::Dict{AbstractString,ThermalUnit}
thermal_units::Vector{ThermalUnit}
storage_units_by_name::Dict{AbstractString,StorageUnit}
storage_units::Vector{StorageUnit}
time::Int
time_step::Int
end
Base.@kwdef mutable struct UnitCommitmentInstance

View File

@@ -1,220 +0,0 @@
# UnitCommitment.jl: Optimization Package for Security-Constrained Unit Commitment
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
"""
solve_market(
da_path::Union{String, Vector{String}},
rt_paths::Vector{String},
settings::MarketSettings;
optimizer,
lp_optimizer = nothing,
after_build_da = nothing,
after_optimize_da = nothing,
after_build_rt = nothing,
after_optimize_rt = nothing,
)::OrderedDict
Solve the day-ahead and the real-time markets by the means of commitment status mapping.
The method firstly acquires the commitment status outcomes through the resolution of the day-ahead market;
and secondly resolves each real-time market based on the corresponding results obtained previously.
Arguments
---------
- `da_path`:
the data file path of the day-ahead market, can be stochastic.
- `rt_paths`:
the list of data file paths of the real-time markets, must be deterministic for each market.
- `settings`:
the MarketSettings which include the problem formulation, the solving method, and LMP method.
- `optimizer`:
the optimizer for solving the problem.
- `lp_optimizer`:
the linear programming optimizer for solving the LMP problem, defaults to `nothing`.
If not specified by the user, the program uses `optimizer` instead.
- `after_build_da`:
a user-defined function that allows modifying the DA model after building,
must have 2 arguments `model` and `instance` in order.
- `after_optimize_da`:
a user-defined function that allows handling additional steps after optimizing the DA model,
must have 3 arguments `solution`, `model` and `instance` in order.
- `after_build_rt`:
a user-defined function that allows modifying each RT model after building,
must have 2 arguments `model` and `instance` in order.
- `after_optimize_rt`:
a user-defined function that allows handling additional steps after optimizing each RT model,
must have 3 arguments `solution`, `model` and `instance` in order.
Examples
--------
```julia
using UnitCommitment, Cbc, HiGHS
import UnitCommitment:
MarketSettings,
XavQiuWanThi2019,
ConventionalLMP,
Formulation
solution = UnitCommitment.solve_market(
"da_instance.json",
["rt_instance_1.json", "rt_instance_2.json", "rt_instance_3.json"],
MarketSettings(
inner_method = XavQiuWanThi2019.Method(),
lmp_method = ConventionalLMP(),
formulation = Formulation(),
),
optimizer = Cbc.Optimizer,
lp_optimizer = HiGHS.Optimizer,
)
"""
function solve_market(
da_path::Union{String,Vector{String}},
rt_paths::Vector{String},
settings::MarketSettings;
optimizer,
lp_optimizer = nothing,
after_build_da = nothing,
after_optimize_da = nothing,
after_build_rt = nothing,
after_optimize_rt = nothing,
)::OrderedDict
# solve da instance as usual
@info "Solving the day-ahead market with file $da_path..."
instance_da = UnitCommitment.read(da_path)
# LP optimizer is optional: if not specified, use optimizer
lp_optimizer = lp_optimizer === nothing ? optimizer : lp_optimizer
# build and optimize the DA market
model_da, solution_da = _build_and_optimize(
instance_da,
settings,
optimizer = optimizer,
lp_optimizer = lp_optimizer,
after_build = after_build_da,
after_optimize = after_optimize_da,
)
# prepare the final solution
solution = OrderedDict()
solution["Day-ahead market"] = solution_da
solution["Real-time markets"] = OrderedDict()
# count the time, sc.time = n-slots, sc.time_step = slot-interval
# sufficient to look at only one scenario
sc = instance_da.scenarios[1]
# max time (min) of the DA market
max_time = sc.time * sc.time_step
# current time increments through the RT market list
current_time = 0
# DA market time slots in (min)
da_time_intervals = [sc.time_step * ts for ts in 1:sc.time]
# get the uc status and set each uc fixed
solution_rt = OrderedDict()
prev_initial_status = OrderedDict()
for rt_path in rt_paths
@info "Solving the real-time market with file $rt_path..."
instance_rt = UnitCommitment.read(rt_path)
# check instance time
sc = instance_rt.scenarios[1]
# check each time slot in the RT model
for ts in 1:sc.time
slot_t_end = current_time + ts * sc.time_step
# ensure this RT's slot time ub never exceeds max time of DA
slot_t_end <= max_time || error(
"The time of the real-time market cannot exceed the time of the day-ahead market.",
)
# get the slot start time to determine commitment status
slot_t_start = slot_t_end - sc.time_step
# find the index of the first DA time slot that covers slot_t_start
da_time_slot = findfirst(ti -> slot_t_start < ti, da_time_intervals)
# update thermal unit commitment status
for g in sc.thermal_units
g.commitment_status[ts] =
value(model_da[:is_on][g.name, da_time_slot]) == 1.0
end
end
# update current time by ONE slot only
current_time += sc.time_step
# set initial status for all generators in all scenarios
if !isempty(solution_rt) && !isempty(prev_initial_status)
for g in sc.thermal_units
g.initial_power =
solution_rt["Thermal production (MW)"][g.name][1]
g.initial_status = UnitCommitment._determine_initial_status(
prev_initial_status[g.name],
[solution_rt["Is on"][g.name][1]],
)
end
end
# build and optimize the RT market
_, solution_rt = _build_and_optimize(
instance_rt,
settings,
optimizer = optimizer,
lp_optimizer = lp_optimizer,
after_build = after_build_rt,
after_optimize = after_optimize_rt,
)
prev_initial_status =
OrderedDict(g.name => g.initial_status for g in sc.thermal_units)
# rt_name = first(split(last(split(rt_path, "/")), "."))
solution["Real-time markets"][rt_path] = solution_rt
end # end of for-loop that checks each RT market
return solution
end
function _build_and_optimize(
instance::UnitCommitmentInstance,
settings::MarketSettings;
optimizer,
lp_optimizer,
after_build = nothing,
after_optimize = nothing,
)::Tuple{JuMP.Model,OrderedDict}
# build model with after build
model = UnitCommitment.build_model(
instance = instance,
optimizer = optimizer,
formulation = settings.formulation,
)
if after_build !== nothing
after_build(model, instance)
end
# optimize model
UnitCommitment.optimize!(model, settings.inner_method)
solution = UnitCommitment.solution(model)
# compute lmp and add to solution
if settings.lmp_method !== nothing
lmp = UnitCommitment.compute_lmp(
model,
settings.lmp_method,
optimizer = lp_optimizer,
)
if length(instance.scenarios) == 1
solution["Locational marginal price"] = lmp
else
for sc in instance.scenarios
solution[sc.name]["Locational marginal price"] = OrderedDict(
key => val for (key, val) in lmp if key[1] == sc.name
)
end
end
end
# run after optimize with solution
if after_optimize !== nothing
after_optimize(solution, model, instance)
end
return model, solution
end

View File

@@ -1,33 +0,0 @@
# UnitCommitment.jl: Optimization Package for Security-Constrained Unit Commitment
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
import ..SolutionMethod
import ..PricingMethod
import ..Formulation
"""
struct MarketSettings
inner_method::SolutionMethod = XavQiuWanThi2019.Method()
lmp_method::Union{PricingMethod, Nothing} = ConventionalLMP()
formulation::Formulation = Formulation()
end
Market setting struct, typically used to map a day-ahead market to real-time markets.
Arguments
---------
- `inner_method`:
method to solve each marketing problem.
- `lmp_method`:
a PricingMethod method to calculate the locational marginal prices.
If it is set to `nothing`, the LMPs will not be calculated.
- `formulation`:
problem formulation.
"""
Base.@kwdef struct MarketSettings
inner_method::SolutionMethod = XavQiuWanThi2019.Method()
lmp_method::Union{PricingMethod,Nothing} = ConventionalLMP()
formulation::Formulation = Formulation()
end

View File

@@ -99,9 +99,6 @@ function build_model(;
for pu in sc.profiled_units
_add_profiled_unit!(model, pu, sc)
end
for su in sc.storage_units
_add_storage_unit!(model, su, sc)
end
_add_system_wide_eqs!(model, sc)
end
@objective(model, Min, model[:obj])

View File

@@ -99,7 +99,7 @@ function _add_production_piecewise_linear_eqs!(
add_to_expression!(
model[:obj],
segprod[sc.name, gn, t, k],
sc.probability * g.cost_segments[k].cost[t],
g.cost_segments[k].cost[t],
)
# Also add an explicit upper bound on segprod to make the solver's

View File

@@ -10,8 +10,8 @@ using SparseArrays, Base.Threads, LinearAlgebra, JuMP
Returns a (B-1)xL matrix M, where B is the number of buses and L is the number
of transmission lines. For a given bus b and transmission line l, the entry
M[l.offset, b.offset] indicates the amount of power (in MW) that flows through
transmission line l when 1 MW of power is injected at b and withdrawn from the
slack bus (the bus that has offset zero).
transmission line l when 1 MW of power is injected at the slack bus (the bus
that has offset zero) and withdrawn from b.
"""
function _injection_shift_factors(;
buses::Array{Bus},

View File

@@ -1,125 +0,0 @@
# UnitCommitment.jl: Optimization Package for Security-Constrained Unit Commitment
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
function _add_storage_unit!(
model::JuMP.Model,
su::StorageUnit,
sc::UnitCommitmentScenario,
)::Nothing
# Initialize variables
storage_level = _init(model, :storage_level)
charge_rate = _init(model, :charge_rate)
discharge_rate = _init(model, :discharge_rate)
is_charging = _init(model, :is_charging)
is_discharging = _init(model, :is_discharging)
eq_min_charge_rate = _init(model, :eq_min_charge_rate)
eq_max_charge_rate = _init(model, :eq_max_charge_rate)
eq_min_discharge_rate = _init(model, :eq_min_discharge_rate)
eq_max_discharge_rate = _init(model, :eq_max_discharge_rate)
# Initialize constraints
net_injection = _init(model, :expr_net_injection)
eq_storage_transition = _init(model, :eq_storage_transition)
eq_ending_level = _init(model, :eq_ending_level)
# time in hours
time_step = sc.time_step / 60
for t in 1:model[:instance].time
# Decision variable
storage_level[sc.name, su.name, t] = @variable(
model,
lower_bound = su.min_level[t],
upper_bound = su.max_level[t]
)
charge_rate[sc.name, su.name, t] = @variable(model)
discharge_rate[sc.name, su.name, t] = @variable(model)
is_charging[sc.name, su.name, t] = @variable(model, binary = true)
is_discharging[sc.name, su.name, t] = @variable(model, binary = true)
# Objective function terms ##### CHECK & FIXME
add_to_expression!(
model[:obj],
charge_rate[sc.name, su.name, t],
su.charge_cost[t] * sc.probability,
)
add_to_expression!(
model[:obj],
discharge_rate[sc.name, su.name, t],
su.discharge_cost[t] * sc.probability,
)
# Net injection
add_to_expression!(
net_injection[sc.name, su.bus.name, t],
discharge_rate[sc.name, su.name, t],
1.0,
)
add_to_expression!(
net_injection[sc.name, su.bus.name, t],
charge_rate[sc.name, su.name, t],
-1.0,
)
# Simultaneous charging and discharging
if !su.simultaneous_charge_and_discharge[t]
# Initialize the model dictionary
eq_simultaneous_charge_and_discharge =
_init(model, :eq_simultaneous_charge_and_discharge)
# Constraints
eq_simultaneous_charge_and_discharge[sc.name, su.name, t] =
@constraint(
model,
is_charging[sc.name, su.name, t] +
is_discharging[sc.name, su.name, t] <= 1.0
)
end
# Charge and discharge constraints
eq_min_charge_rate[sc.name, su.name, t] = @constraint(
model,
charge_rate[sc.name, su.name, t] >=
is_charging[sc.name, su.name, t] * su.min_charge_rate[t]
)
eq_max_charge_rate[sc.name, su.name, t] = @constraint(
model,
charge_rate[sc.name, su.name, t] <=
is_charging[sc.name, su.name, t] * su.max_charge_rate[t]
)
eq_min_discharge_rate[sc.name, su.name, t] = @constraint(
model,
discharge_rate[sc.name, su.name, t] >=
is_discharging[sc.name, su.name, t] * su.min_discharge_rate[t]
)
eq_max_discharge_rate[sc.name, su.name, t] = @constraint(
model,
discharge_rate[sc.name, su.name, t] <=
is_discharging[sc.name, su.name, t] * su.max_discharge_rate[t]
)
# Storage energy transition constraint
prev_storage_level =
t == 1 ? su.initial_level : storage_level[sc.name, su.name, t-1]
eq_storage_transition[sc.name, su.name, t] = @constraint(
model,
storage_level[sc.name, su.name, t] ==
(1 - su.loss_factor[t]) * prev_storage_level +
charge_rate[sc.name, su.name, t] *
time_step *
su.charge_efficiency[t] -
discharge_rate[sc.name, su.name, t] * time_step /
su.discharge_efficiency[t]
)
# Storage ending level constraint
if t == sc.time
eq_ending_level[sc.name, su.name] = @constraint(
model,
su.min_ending_level <=
storage_level[sc.name, su.name, t] <=
su.max_ending_level
)
end
end
return
end

View File

@@ -1,71 +0,0 @@
# UnitCommitment.jl: Optimization Package for Security-Constrained Unit Commitment
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
using .MIPLearn
using Suppressor
using JuMP
function _build_ucjl_model(instance, method)
if instance isa String
instance = UnitCommitment.read(instance)
end
model = UnitCommitment.build_model(
instance = instance,
optimizer = method.optimizer,
variable_names = true,
)
write_to_file(model, "/tmp/model.lp")
return JumpModel(model)
end
function _set_default_collectors!(method::MIPLearnMethod)
method.collectors = [BasicCollector()]
return
end
function _set_default_solver!(method::MIPLearnMethod)
KNN = MIPLearn.pyimport("sklearn.neighbors").KNeighborsClassifier
method.solver = LearningSolver(
components = [
MemorizingPrimalComponent(
clf = KNN(n_neighbors = 30),
extractor = H5FieldsExtractor(
instance_fields = ["static_var_obj_coeffs"],
),
constructor = MergeTopSolutions(30, [0.0, 1.0]),
action = FixVariables(),
),
],
)
return
end
function collect!(filenames::Vector, method::MIPLearnMethod)
build(x) = _build_ucjl_model(x, method)
if method.collectors === nothing
_set_default_collectors!(method)
end
for c in method.collectors
c.collect(filenames, build)
end
end
function fit!(filenames::Vector, method::MIPLearnMethod)
if method.solver === nothing
_set_default_solver!(method)
end
return method.solver.fit(filenames)
end
function optimize!(filename::AbstractString, method::MIPLearnMethod)
build(x) = _build_ucjl_model(x, method)
method.solver.optimize(filename, build)
return
end
function optimize!(instance::UnitCommitmentInstance, method::MIPLearnMethod)
model = _build_ucjl_model(instance, method)
method.solver.optimize(model)
return
end

View File

@@ -1,11 +0,0 @@
# UnitCommitment.jl: Optimization Package for Security-Constrained Unit Commitment
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
using Suppressor
Base.@kwdef mutable struct MIPLearnMethod
optimizer::Any
collectors::Any = nothing
solver::Any = nothing
end

View File

@@ -1,230 +0,0 @@
# UnitCommitment.jl: Optimization Package for Security-Constrained Unit Commitment
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
using MPI, Printf
using TimerOutputs
import JuMP
const to = TimerOutput()
function optimize!(model::JuMP.Model, method::ProgressiveHedging)::Nothing
mpi = MpiInfo(MPI.COMM_WORLD)
iterations = PHIterationInfo[]
consensus_vars = [var for var in all_variables(model) if is_binary(var)]
nvars = length(consensus_vars)
weights = ones(nvars)
if method.initial_weights !== nothing
weights = copy(method.initial_weights)
end
target = zeros(nvars)
if method.initial_target !== nothing
target = copy(method.initial_target)
end
params = PHSubProblemParams(
ρ = method.ρ,
λ = [method.λ for _ in 1:nvars],
target = target,
)
sp = PHSubProblem(model, model[:obj], consensus_vars, weights)
while true
iteration_time = @elapsed begin
solution = solve_subproblem(sp, params, method.inner_method)
MPI.Barrier(mpi.comm)
global_obj = compute_global_objective(mpi, solution)
target = compute_target(mpi, solution)
update_λ_and_residuals!(solution, params, target)
global_infeas = compute_global_infeasibility(solution, mpi)
global_residual = compute_global_residual(mpi, solution)
if has_numerical_issues(target)
break
end
end
total_elapsed_time =
compute_total_elapsed_time(iteration_time, iterations)
current_iteration = PHIterationInfo(
global_infeas = global_infeas,
global_obj = global_obj,
global_residual = global_residual,
iteration_number = length(iterations) + 1,
iteration_time = iteration_time,
sp_vals = solution.vals,
sp_obj = solution.obj,
target = target,
total_elapsed_time = total_elapsed_time,
)
push!(iterations, current_iteration)
print_progress(mpi, current_iteration, method.print_interval)
if should_stop(mpi, iterations, method.termination)
break
end
end
return
end
function compute_total_elapsed_time(
iteration_time::Float64,
iterations::Array{PHIterationInfo,1},
)::Float64
length(iterations) > 0 ?
current_total_time = last(iterations).total_elapsed_time :
current_total_time = 0
return current_total_time + iteration_time
end
function compute_global_objective(
mpi::MpiInfo,
s::PhSubProblemSolution,
)::Float64
global_obj = MPI.Allreduce(s.obj, MPI.SUM, mpi.comm)
global_obj /= mpi.nprocs
return global_obj
end
function compute_target(mpi::MpiInfo, s::PhSubProblemSolution)::Array{Float64,1}
sp_vals = s.vals
target = MPI.Allreduce(sp_vals, MPI.SUM, mpi.comm)
target = target / mpi.nprocs
return target
end
function compute_global_residual(mpi::MpiInfo, s::PhSubProblemSolution)::Float64
n_vars = length(s.vals)
local_residual_sum = abs.(s.residuals)
global_residual_sum = MPI.Allreduce(local_residual_sum, MPI.SUM, mpi.comm)
return sum(global_residual_sum) / n_vars
end
function compute_global_infeasibility(
solution::PhSubProblemSolution,
mpi::MpiInfo,
)::Float64
local_infeasibility = norm(solution.residuals)
global_infeas = MPI.Allreduce(local_infeasibility, MPI.SUM, mpi.comm)
return global_infeas
end
function solve_subproblem(
sp::PHSubProblem,
params::PHSubProblemParams,
method::SolutionMethod,
)::PhSubProblemSolution
G = length(sp.consensus_vars)
if norm(params.λ) < 1e-3
@objective(sp.mip, Min, sp.obj)
else
@objective(
sp.mip,
Min,
sp.obj +
sum(
sp.weights[g] *
params.λ[g] *
(sp.consensus_vars[g] - params.target[g]) for g in 1:G
) +
(params.ρ / 2) * sum(
sp.weights[g] * (sp.consensus_vars[g] - params.target[g])^2 for
g in 1:G
)
)
end
optimize!(sp.mip, method)
obj = objective_value(sp.mip)
sp_vals = value.(sp.consensus_vars)
return PhSubProblemSolution(obj = obj, vals = sp_vals, residuals = zeros(G))
end
function update_λ_and_residuals!(
solution::PhSubProblemSolution,
params::PHSubProblemParams,
target::Array{Float64,1},
)::Nothing
n_vars = length(solution.vals)
params.target = target
for n in 1:n_vars
solution.residuals[n] = solution.vals[n] - params.target[n]
params.λ[n] += params.ρ * solution.residuals[n]
end
end
function print_header(mpi::MpiInfo)::Nothing
if !mpi.root
return
end
@info "Solving via Progressive Hedging:"
@info @sprintf(
"%8s %20s %20s %14s %8s %8s",
"iter",
"obj",
"infeas",
"consensus",
"time-it",
"time"
)
end
function print_progress(
mpi::MpiInfo,
iteration::PHIterationInfo,
print_interval,
)::Nothing
if !mpi.root
return
end
if iteration.iteration_number % print_interval != 0
return
end
@info @sprintf(
"%8d %20.6e %20.6e %12.2f %% %8.2f %8.2f",
iteration.iteration_number,
iteration.global_obj,
iteration.global_infeas,
iteration.global_residual * 100,
iteration.iteration_time,
iteration.total_elapsed_time
)
end
function has_numerical_issues(target::Array{Float64,1})::Bool
if target == NaN
@warn "Numerical issues detected. Stopping."
return true
end
return false
end
function should_stop(
mpi::MpiInfo,
iterations::Array{PHIterationInfo,1},
termination::PHTermination,
)::Bool
if length(iterations) >= termination.max_iterations
if mpi.root
@info "Iteration limit reached. Stopping."
end
return true
end
if length(iterations) < termination.min_iterations
return false
end
if last(iterations).total_elapsed_time > termination.max_time
if mpi.root
@info "Time limit reached. Stopping."
end
return true
end
curr_it = last(iterations)
prev_it = iterations[length(iterations)-1]
if curr_it.global_infeas < termination.min_feasibility
obj_change = abs(prev_it.global_obj - curr_it.global_obj)
if obj_change < termination.min_improvement
if mpi.root
@info "Feasibility limit reached. Stopping."
end
return true
end
end
return false
end

View File

@@ -1,18 +0,0 @@
# UnitCommitment.jl: Optimization Package for Security-Constrained Unit Commitment
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
function read(
paths::Vector{String},
::ProgressiveHedging,
)::UnitCommitmentInstance
comm = MPI.COMM_WORLD
mpi = MpiInfo(comm)
(length(paths) % mpi.nprocs == 0) || error(
"Number of processes $(mpi.nprocs) is not a divisor of $(length(paths))",
)
bundled_scenarios = length(paths) ÷ mpi.nprocs
sc_num_start = (mpi.rank - 1) * bundled_scenarios + 1
sc_num_end = mpi.rank * bundled_scenarios
return read(paths[sc_num_start:sc_num_end])
end

View File

@@ -1,83 +0,0 @@
# UnitCommitment.jl: Optimization Package for Security-Constrained Unit Commitment
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
using MPI, DataStructures
const FIRST_STAGE_VARS = ["Is on", "Switch on", "Switch off"]
function solution(model::JuMP.Model, method::ProgressiveHedging)::OrderedDict
comm = MPI.COMM_WORLD
mpi = MpiInfo(comm)
sp_solution = UnitCommitment.solution(model)
gather_solution = OrderedDict()
for (solution_key, dict) in sp_solution
if solution_key !== "Spinning reserve (MW)" &&
solution_key FIRST_STAGE_VARS
push!(gather_solution, solution_key => OrderedDict())
for (gen_bus_key, values) in dict
global T = length(values)
receive_values =
MPI.UBuffer(Vector{Float64}(undef, T * mpi.nprocs), T)
MPI.Gather!(float.(values), receive_values, comm)
if mpi.root
push!(
gather_solution[solution_key],
gen_bus_key => receive_values.data,
)
end
end
end
end
push!(gather_solution, "Spinning reserve (MW)" => OrderedDict())
for (reserve_type, dict) in sp_solution["Spinning reserve (MW)"]
push!(
gather_solution["Spinning reserve (MW)"],
reserve_type => OrderedDict(),
)
for (gen_key, values) in dict
receive_values =
MPI.UBuffer(Vector{Float64}(undef, T * mpi.nprocs), T)
MPI.Gather!(float.(values), receive_values, comm)
if mpi.root
push!(
gather_solution["Spinning reserve (MW)"][reserve_type],
gen_key => receive_values.data,
)
end
end
end
aggregate_solution = OrderedDict()
if mpi.root
for first_stage_var in FIRST_STAGE_VARS
aggregate_solution[first_stage_var] = OrderedDict()
for gen_key in keys(sp_solution[first_stage_var])
aggregate_solution[first_stage_var][gen_key] =
sp_solution[first_stage_var][gen_key]
end
end
for i in 1:mpi.nprocs
push!(aggregate_solution, "s$i" => OrderedDict())
for (solution_key, solution_dict) in gather_solution
push!(aggregate_solution["s$i"], solution_key => OrderedDict())
if solution_key !== "Spinning reserve (MW)"
for (gen_bus_key, values) in solution_dict
aggregate_solution["s$i"][solution_key][gen_bus_key] =
gather_solution[solution_key][gen_bus_key][(i-1)*T+1:i*T]
end
else
for (reserve_name, reserve_dict) in solution_dict
push!(
aggregate_solution["s$i"][solution_key],
reserve_name => OrderedDict(),
)
for (gen_key, values) in reserve_dict
aggregate_solution["s$i"][solution_key][reserve_name][gen_key] =
gather_solution[solution_key][reserve_name][gen_key][(i-1)*T+1:i*T]
end
end
end
end
end
end
return aggregate_solution
end

View File

@@ -1,73 +0,0 @@
# UnitCommitment.jl: Optimization Package for Security-Constrained Unit Commitment
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
using JuMP, MPI, TimerOutputs
Base.@kwdef mutable struct PHTermination
max_iterations::Int = 1000
max_time::Float64 = 14400.0
min_feasibility::Float64 = 1e-3
min_improvement::Float64 = 1e-3
min_iterations::Int = 2
end
Base.@kwdef mutable struct PHIterationInfo
global_infeas::Float64
global_obj::Float64
global_residual::Float64
iteration_number::Int
iteration_time::Float64
sp_vals::Array{Float64,1}
sp_obj::Float64
target::Array{Float64,1}
total_elapsed_time::Float64
end
Base.@kwdef mutable struct ProgressiveHedging <: SolutionMethod
initial_weights::Union{Vector{Float64},Nothing} = nothing
initial_target::Union{Vector{Float64},Nothing} = nothing
ρ::Float64 = 1.0
λ::Float64 = 0.0
print_interval::Int = 1
termination::PHTermination = PHTermination()
inner_method::SolutionMethod = XavQiuWanThi2019.Method()
end
struct SpResult
obj::Float64
vals::Array{Float64,1}
end
Base.@kwdef mutable struct PHSubProblem
mip::JuMP.Model
obj::AffExpr
consensus_vars::Array{VariableRef,1}
weights::Array{Float64,1}
end
Base.@kwdef struct PhSubProblemSolution
obj::Float64
vals::Array{Float64,1}
residuals::Array{Float64,1}
end
Base.@kwdef mutable struct PHSubProblemParams
ρ::Float64
λ::Array{Float64,1}
target::Array{Float64,1}
end
struct MpiInfo
comm::Any
rank::Int
root::Bool
nprocs::Int
function MpiInfo(comm)
rank = MPI.Comm_rank(comm) + 1
is_root = (rank == 1)
nprocs = MPI.Comm_size(comm)
return new(comm, rank, is_root, nprocs)
end
end

View File

@@ -1,259 +0,0 @@
# UnitCommitment.jl: Optimization Package for Security-Constrained Unit Commitment
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
"""
optimize!(
instance::UnitCommitmentInstance,
method::TimeDecomposition;
optimizer,
after_build = nothing,
after_optimize = nothing,
)::OrderedDict
Solve the given unit commitment instance with time decomposition.
The model solves each sub-problem of a given time length specified by method.time_window,
and proceeds to the next sub-problem by incrementing the time length of `method.time_increment`.
Arguments
---------
- `instance`:
the UnitCommitment instance.
- `method`:
the `TimeDecomposition` method.
- `optimizer`:
the optimizer for solving the problem.
- `after_build`:
a user-defined function that allows modifying the model after building,
must have 2 arguments `model` and `instance` in order.
- `after_optimize`:
a user-defined function that allows handling additional steps after optimizing,
must have 3 arguments `solution`, `model` and `instance` in order.
Examples
--------
```julia
using UnitCommitment, JuMP, Cbc, HiGHS
import UnitCommitment:
TimeDecomposition,
ConventionalLMP,
XavQiuWanThi2019,
Formulation
# specifying the after_build and after_optimize functions
function after_build(model, instance)
@constraint(
model,
model[:is_on]["g3", 1] + model[:is_on]["g4", 1] <= 1,
)
end
lmps = []
function after_optimize(solution, model, instance)
lmp = UnitCommitment.compute_lmp(
model,
ConventionalLMP(),
optimizer = HiGHS.Optimizer,
)
return push!(lmps, lmp)
end
# assume the instance is given as a 120h problem
instance = UnitCommitment.read("instance.json")
solution = UnitCommitment.optimize!(
instance,
TimeDecomposition(
time_window = 36, # solve 36h problems
time_increment = 24, # advance by 24h each time
inner_method = XavQiuWanThi2019.Method(),
formulation = Formulation(),
),
optimizer = Cbc.Optimizer,
after_build = after_build,
after_optimize = after_optimize,
)
"""
function optimize!(
instance::UnitCommitmentInstance,
method::TimeDecomposition;
optimizer,
after_build = nothing,
after_optimize = nothing,
)::OrderedDict
# get instance total length
T = instance.time
solution = OrderedDict()
if length(instance.scenarios) > 1
for sc in instance.scenarios
solution[sc.name] = OrderedDict()
end
end
# for each iteration, time increment by method.time_increment
for t_start in 1:method.time_increment:T
t_end = t_start + method.time_window - 1
# if t_end exceed total T
t_end = t_end > T ? T : t_end
# slice the model
@info "Solving the sub-problem of time $t_start to $t_end..."
sub_instance = UnitCommitment.slice(instance, t_start:t_end)
# build and optimize the model
sub_model = UnitCommitment.build_model(
instance = sub_instance,
optimizer = optimizer,
formulation = method.formulation,
)
if after_build !== nothing
@info "Calling after build..."
after_build(sub_model, sub_instance)
end
UnitCommitment.optimize!(sub_model, method.inner_method)
# get the result of each time period
sub_solution = UnitCommitment.solution(sub_model)
if after_optimize !== nothing
@info "Calling after optimize..."
after_optimize(sub_solution, sub_model, sub_instance)
end
# merge solution
if length(instance.scenarios) == 1
_update_solution!(solution, sub_solution, method.time_increment)
else
for sc in instance.scenarios
_update_solution!(
solution[sc.name],
sub_solution[sc.name],
method.time_increment,
)
end
end
# set the initial status for the next sub-problem
_set_initial_status!(instance, solution, method.time_increment)
end
return solution
end
"""
_set_initial_status!(
instance::UnitCommitmentInstance,
solution::OrderedDict,
time_increment::Int,
)
Set the thermal units' initial power levels and statuses based on the last bunch of time slots
specified by time_increment in the solution dictionary.
"""
function _set_initial_status!(
instance::UnitCommitmentInstance,
solution::OrderedDict,
time_increment::Int,
)
for sc in instance.scenarios
for thermal_unit in sc.thermal_units
if length(instance.scenarios) == 1
prod = solution["Thermal production (MW)"][thermal_unit.name]
is_on = solution["Is on"][thermal_unit.name]
else
prod =
solution[sc.name]["Thermal production (MW)"][thermal_unit.name]
is_on = solution[sc.name]["Is on"][thermal_unit.name]
end
thermal_unit.initial_power = prod[end]
thermal_unit.initial_status = _determine_initial_status(
thermal_unit.initial_status,
is_on[end-time_increment+1:end],
)
end
end
end
"""
_determine_initial_status(
prev_initial_status::Union{Float64,Int},
status_sequence::Vector{Float64},
)::Union{Float64,Int}
Determines a thermal unit's initial status based on its previous initial status, and
the on/off statuses in the last operation.
"""
function _determine_initial_status(
prev_initial_status::Union{Float64,Int},
status_sequence::Vector{Float64},
)::Union{Float64,Int}
# initialize the two flags
on_status = prev_initial_status
off_status = prev_initial_status
# read through the status sequence
# at each time if the unit is on, reset off_status, increment on_status
# if the on_status < 0, set it to 1.0
# at each time if the unit is off, reset on_status, decrement off_status
# if the off_status > 0, set it to -1.0
for status in status_sequence
if status == 1.0
on_status = on_status < 0.0 ? 1.0 : on_status + 1.0
off_status = 0.0
else
on_status = 0.0
off_status = off_status > 0.0 ? -1.0 : off_status - 1.0
end
end
# only one of them has non-zero value
return on_status + off_status
end
"""
_update_solution!(
solution::OrderedDict,
sub_solution::OrderedDict,
time_increment::Int,
)
Updates the solution (of each scenario) by concatenating the first bunch of
time slots of the newly generated sub-solution to the end of the final solution dictionary.
This function traverses through the dictionary keys, finds the vector and finally
does the concatenation. For now, the function is hardcoded to traverse at most 3 layers
of depth until it finds a vector object.
"""
function _update_solution!(
solution::OrderedDict,
sub_solution::OrderedDict,
time_increment::Int,
)
# the solution has at most 3 layers
for (l1_k, l1_v) in sub_solution
for (l2_k, l2_v) in l1_v
if l2_v isa Array
# slice the sub_solution
values_of_interest = l2_v[1:time_increment]
sub_solution[l1_k][l2_k] = values_of_interest
# append to the solution
if !isempty(solution)
append!(solution[l1_k][l2_k], values_of_interest)
end
elseif l2_v isa OrderedDict
for (l3_k, l3_v) in l2_v
# slice the sub_solution
values_of_interest = l3_v[1:time_increment]
sub_solution[l1_k][l2_k][l3_k] = values_of_interest
# append to the solution
if !isempty(solution)
append!(solution[l1_k][l2_k][l3_k], values_of_interest)
end
end
end
end
end
# if solution is never initialized, deep copy the sliced sub_solution
if isempty(solution)
merge!(solution, sub_solution)
end
end

View File

@@ -1,35 +0,0 @@
# UnitCommitment.jl: Optimization Package for Security-Constrained Unit Commitment
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
import ..SolutionMethod
import ..Formulation
"""
mutable struct TimeDecomposition <: SolutionMethod
time_window::Int
time_increment::Int
inner_method::SolutionMethod = XavQiuWanThi2019.Method()
formulation::Formulation = Formulation()
end
Time decomposition method to solve a problem with moving time window.
Fields
------
- `time_window`:
the time window of each sub-problem during the entire optimization procedure.
- `time_increment`:
the time incremented to the next sub-problem.
- `inner_method`:
method to solve each sub-problem.
- `formulation`:
problem formulation.
"""
Base.@kwdef mutable struct TimeDecomposition <: SolutionMethod
time_window::Int
time_increment::Int
inner_method::SolutionMethod = XavQiuWanThi2019.Method()
formulation::Formulation = Formulation()
end

View File

@@ -103,30 +103,6 @@ function solution(model::JuMP.Model)::OrderedDict
] for pu in sc.profiled_units
)
end
if !isempty(sc.storage_units)
sol[sc.name]["Storage level (MWh)"] =
timeseries(model[:storage_level], sc.storage_units, sc = sc)
sol[sc.name]["Is charging"] =
timeseries(model[:is_charging], sc.storage_units, sc = sc)
sol[sc.name]["Storage charging rates (MW)"] =
timeseries(model[:charge_rate], sc.storage_units, sc = sc)
sol[sc.name]["Storage charging cost (\$)"] = OrderedDict(
su.name => [
value(model[:charge_rate][sc.name, su.name, t]) *
su.charge_cost[t] for t in 1:instance.time
] for su in sc.storage_units
)
sol[sc.name]["Is discharging"] =
timeseries(model[:is_discharging], sc.storage_units, sc = sc)
sol[sc.name]["Storage discharging rates (MW)"] =
timeseries(model[:discharge_rate], sc.storage_units, sc = sc)
sol[sc.name]["Storage discharging cost (\$)"] = OrderedDict(
su.name => [
value(model[:discharge_rate][sc.name, su.name, t]) *
su.discharge_cost[t] for t in 1:instance.time
] for su in sc.storage_units
)
end
sol[sc.name]["Spinning reserve (MW)"] = OrderedDict(
r.name => OrderedDict(
g.name => [

View File

@@ -5,31 +5,13 @@
using JuMP
"""
generate_initial_conditions!(instance, optimizer)
generate_initial_conditions!(sc, optimizer)
Generates feasible initial conditions for the given instance, by constructing
Generates feasible initial conditions for the given scenario, by constructing
and solving a single-period mixed-integer optimization problem, using the given
optimizer. The instance is modified in-place.
optimizer. The scenario is modified in-place.
"""
function generate_initial_conditions!(
instance::UnitCommitmentInstance,
optimizer,
)::Nothing
# Process first scenario
_generate_initial_conditions!(instance.scenarios[1], optimizer)
# Copy initial conditions to remaining scenarios
for (si, sc) in enumerate(instance.scenarios)
si > 1 || continue
for (gi, g) in sc.thermal_units
g_ref = instance.scenarios[1].thermal_units[gi]
g.initial_power = g_ref.initial_power
g.initial_status = g_ref.initial_status
end
end
end
function _generate_initial_conditions!(
sc::UnitCommitmentScenario,
optimizer,
)::Nothing

View File

@@ -137,11 +137,6 @@ function _randomize_costs(
α = rand(rng, distribution)
pu.cost *= α
end
for su in sc.storage_units
α = rand(rng, distribution)
su.charge_cost *= α
su.discharge_cost *= α
end
return
end

View File

@@ -56,21 +56,6 @@ function slice(
ps.demand = ps.demand[range]
ps.revenue = ps.revenue[range]
end
for su in sc.storage_units
su.min_level = su.min_level[range]
su.max_level = su.max_level[range]
su.simultaneous_charge_and_discharge =
su.simultaneous_charge_and_discharge[range]
su.charge_cost = su.charge_cost[range]
su.discharge_cost = su.discharge_cost[range]
su.charge_efficiency = su.charge_efficiency[range]
su.discharge_efficiency = su.discharge_efficiency[range]
su.loss_factor = su.loss_factor[range]
su.min_charge_rate = su.min_charge_rate[range]
su.max_charge_rate = su.max_charge_rate[range]
su.min_discharge_rate = su.min_discharge_rate[range]
su.max_discharge_rate = su.max_discharge_rate[range]
end
end
return modified
end

View File

@@ -334,195 +334,6 @@ function _validate_units(instance::UnitCommitmentInstance, solution; tol = 0.01)
end
end
end
for su in sc.storage_units
storage_level = solution[sc.name]["Storage level (MWh)"][su.name]
charge_rate =
solution[sc.name]["Storage charging rates (MW)"][su.name]
discharge_rate =
solution[sc.name]["Storage discharging rates (MW)"][su.name]
actual_charge_cost =
solution[sc.name]["Storage charging cost (\$)"][su.name]
actual_discharge_cost =
solution[sc.name]["Storage discharging cost (\$)"][su.name]
is_charging = bin(solution[sc.name]["Is charging"][su.name])
is_discharging = bin(solution[sc.name]["Is discharging"][su.name])
# time in hours
time_step = sc.time_step / 60
for t in 1:instance.time
# Unit must store at least its minimum level
if storage_level[t] < su.min_level[t] - tol
@error @sprintf(
"Storage unit %s stores below its minimum level at time %d (%.2f < %.2f)",
su.name,
t,
storage_level[t],
su.min_level[t]
)
err_count += 1
end
# Unit must store at most its maximum level
if storage_level[t] > su.max_level[t] + tol
@error @sprintf(
"Storage unit %s stores above its maximum level at time %d (%.2f > %.2f)",
su.name,
t,
storage_level[t],
su.max_level[t]
)
err_count += 1
end
if t == instance.time
# Unit must store at least its minimum level at last time period
if storage_level[t] < su.min_ending_level - tol
@error @sprintf(
"Storage unit %s stores below its minimum ending level (%.2f < %.2f)",
su.name,
storage_level[t],
su.min_ending_level
)
err_count += 1
end
# Unit must store at most its maximum level at last time period
if storage_level[t] > su.max_ending_level + tol
@error @sprintf(
"Storage unit %s stores above its maximum ending level (%.2f > %.2f)",
su.name,
storage_level[t],
su.max_ending_level
)
err_count += 1
end
end
# Unit must follow the energy transition constraint
prev_level = t == 1 ? su.initial_level : storage_level[t-1]
current_level =
(1 - su.loss_factor[t]) * prev_level +
time_step * (
charge_rate[t] * su.charge_efficiency[t] -
discharge_rate[t] / su.discharge_efficiency[t]
)
if abs(storage_level[t] - current_level) > tol
@error @sprintf(
"Storage unit %s has unexpected level at time %d (%.2f should be %.2f)",
unit.name,
t,
storage_level[t],
current_level
)
err_count += 1
end
# Unit cannot simultaneous charge and discharge if it is not allowed
if !su.simultaneous_charge_and_discharge[t] &&
is_charging[t] &&
is_discharging[t]
@error @sprintf(
"Storage unit %s is charging and discharging simultaneous at time %d",
su.name,
t
)
err_count += 1
end
# Unit must charge at least its minimum rate
if is_charging[t] &&
(charge_rate[t] < su.min_charge_rate[t] - tol)
@error @sprintf(
"Storage unit %s charges below its minimum limit at time %d (%.2f < %.2f)",
unit.name,
t,
charge_rate[t],
su.min_charge_rate[t]
)
err_count += 1
end
# Unit must charge at most its maximum rate
if is_charging[t] &&
(charge_rate[t] > su.max_charge_rate[t] + tol)
@error @sprintf(
"Storage unit %s charges above its maximum limit at time %d (%.2f > %.2f)",
unit.name,
t,
charge_rate[t],
su.max_charge_rate[t]
)
err_count += 1
end
# Unit must have zero charge when it is not charging
if !is_charging[t] && (charge_rate[t] > tol)
@error @sprintf(
"Storage unit %s charges power at time %d while not charging (%.2f > 0)",
unit.name,
t,
charge_rate[t]
)
err_count += 1
end
# Unit must discharge at least its minimum rate
if is_discharging[t] &&
(discharge_rate[t] < su.min_discharge_rate[t] - tol)
@error @sprintf(
"Storage unit %s discharges below its minimum limit at time %d (%.2f < %.2f)",
unit.name,
t,
discharge_rate[t],
su.min_discharge_rate[t]
)
err_count += 1
end
# Unit must discharge at most its maximum rate
if is_discharging[t] &&
(discharge_rate[t] > su.max_discharge_rate[t] + tol)
@error @sprintf(
"Storage unit %s discharges above its maximum limit at time %d (%.2f > %.2f)",
unit.name,
t,
discharge_rate[t],
su.max_discharge_rate[t]
)
err_count += 1
end
# Unit must have zero discharge when it is not charging
if !is_discharging[t] && (discharge_rate[t] > tol)
@error @sprintf(
"Storage unit %s discharges power at time %d while not discharging (%.2f > 0)",
unit.name,
t,
discharge_rate[t]
)
err_count += 1
end
# Compute storage costs
charge_cost = su.charge_cost[t] * charge_rate[t]
discharge_cost = su.discharge_cost[t] * discharge_rate[t]
# Compare costs
if abs(actual_charge_cost[t] - charge_cost) > tol
@error @sprintf(
"Storage unit %s has unexpected charge cost at time %d (%.2f should be %.2f)",
unit.name,
t,
actual_charge_cost[t],
charge_cost
)
err_count += 1
end
if abs(actual_discharge_cost[t] - discharge_cost) > tol
@error @sprintf(
"Storage unit %s has unexpected discharge cost at time %d (%.2f should be %.2f)",
unit.name,
t,
actual_discharge_cost[t],
discharge_cost
)
err_count += 1
end
end
end
end
return err_count
end
@@ -535,8 +346,6 @@ function _validate_reserve_and_demand(instance, solution, tol = 0.01)
fixed_load = sum(b.load[t] for b in sc.buses)
ps_load = 0
production = 0
storage_charge = 0
storage_discharge = 0
if length(sc.price_sensitive_loads) > 0
ps_load = sum(
solution[sc.name]["Price-sensitive loads (MW)"][ps.name][t]
@@ -555,38 +364,23 @@ function _validate_reserve_and_demand(instance, solution, tol = 0.01)
for pu in sc.profiled_units
)
end
if length(sc.storage_units) > 0
storage_charge += sum(
solution[sc.name]["Storage charging rates (MW)"][su.name][t]
for su in sc.storage_units
)
storage_discharge += sum(
solution[sc.name]["Storage discharging rates (MW)"][su.name][t]
for su in sc.storage_units
)
end
if "Load curtail (MW)" in keys(solution)
load_curtail = sum(
solution[sc.name]["Load curtail (MW)"][b.name][t] for
b in sc.buses
)
end
balance =
fixed_load - load_curtail - production +
ps_load +
storage_charge - storage_discharge
balance = fixed_load - load_curtail - production + ps_load
# Verify that production equals demand
if abs(balance) > tol
@error @sprintf(
"Non-zero power balance at time %d (%.2f + %.2f - %.2f - %.2f + %.2f - %.2f != 0)",
"Non-zero power balance at time %d (%.2f + %.2f - %.2f - %.2f != 0)",
t,
fixed_load,
ps_load,
load_curtail,
production,
storage_charge,
storage_discharge,
)
err_count += 1
end

View File

@@ -13,8 +13,6 @@ JSON = "682c06a0-de6a-54ab-a142-c8b1cf79cde6"
JuMP = "4076af6c-e467-56ae-b986-b466b2749572"
JuliaFormatter = "98e50ef6-434e-11e9-1051-2b60c6c9e899"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
MIPLearn = "2b1277c3-b477-4c49-a15e-7ba350325c68"
MPI = "da04e1cc-30fd-572f-bb4f-1f8673147195"
MathOptInterface = "b8f27783-ece8-5eb3-8dc8-9495eed66fee"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
Revise = "295af30f-e4ad-537b-8983-00126c2a3abe"

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -1,7 +1,6 @@
module UnitCommitmentT
using JuliaFormatter
using MIPLearn
using UnitCommitment
using Test
@@ -13,18 +12,12 @@ include("model/formulations_test.jl")
include("solution/methods/XavQiuWanThi19/filter_test.jl")
include("solution/methods/XavQiuWanThi19/find_test.jl")
include("solution/methods/XavQiuWanThi19/sensitivity_test.jl")
include("solution/methods/ProgressiveHedging/usage_test.jl")
include("solution/methods/TimeDecomposition/initial_status_test.jl")
include("solution/methods/TimeDecomposition/optimize_test.jl")
include("solution/methods/TimeDecomposition/update_solution_test.jl")
include("solution/methods/MIPLearn/usage_test.jl")
include("transform/initcond_test.jl")
include("transform/slice_test.jl")
include("transform/randomize/XavQiuAhm2021_test.jl")
include("validation/repair_test.jl")
include("lmp/conventional_test.jl")
include("lmp/aelmp_test.jl")
include("market/market_test.jl")
basedir = dirname(@__FILE__)
@@ -44,19 +37,12 @@ function runtests()
solution_methods_XavQiuWanThi19_filter_test()
solution_methods_XavQiuWanThi19_find_test()
solution_methods_XavQiuWanThi19_sensitivity_test()
solution_methods_ProgressiveHedging_usage_test()
solution_methods_TimeDecomposition_initial_status_test()
solution_methods_TimeDecomposition_optimize_test()
solution_methods_TimeDecomposition_update_solution_test()
solution_methods_MIPLearn_usage_test()
transform_initcond_test()
transform_slice_test()
transform_randomize_XavQiuAhm2021_test()
validation_repair_test()
lmp_conventional_test()
lmp_aelmp_test()
simple_market_test()
stochastic_market_test()
end
return
end

View File

@@ -21,11 +21,11 @@ function instance_read_test()
@test length(sc.contingencies) == 19
@test length(sc.price_sensitive_loads) == 1
@test instance.time == 4
@test sc.time_step == 60
@test sc.lines[5].name == "l5"
@test sc.lines[5].source.name == "b2"
@test sc.lines[5].target.name == "b5"
@test sc.lines[5].reactance 0.17388
@test sc.lines[5].susceptance 10.037550333
@test sc.lines[5].normal_flow_limit == [1e8 for t in 1:4]
@test sc.lines[5].emergency_flow_limit == [1e8 for t in 1:4]
@@ -35,6 +35,7 @@ function instance_read_test()
@test sc.lines[1].name == "l1"
@test sc.lines[1].source.name == "b1"
@test sc.lines[1].target.name == "b2"
@test sc.lines[1].reactance 0.059170
@test sc.lines[1].susceptance 29.496860773945
@test sc.lines[1].normal_flow_limit == [300.0 for t in 1:4]
@test sc.lines[1].emergency_flow_limit == [400.0 for t in 1:4]
@@ -137,20 +138,20 @@ function instance_read_test()
sc = instance.scenarios[1]
@test length(sc.profiled_units) == 2
pu1 = sc.profiled_units[1]
@test pu1.name == "g7"
@test pu1.bus.name == "b4"
@test pu1.cost == [100.0 for t in 1:4]
@test pu1.min_power == [60.0 for t in 1:4]
@test pu1.max_power == [100.0 for t in 1:4]
first_pu = sc.profiled_units[1]
@test first_pu.name == "g7"
@test first_pu.bus.name == "b4"
@test first_pu.cost == [100.0 for t in 1:4]
@test first_pu.min_power == [60.0 for t in 1:4]
@test first_pu.max_power == [100.0 for t in 1:4]
@test sc.profiled_units_by_name["g7"].name == "g7"
pu2 = sc.profiled_units[2]
@test pu2.name == "g8"
@test pu2.bus.name == "b5"
@test pu2.cost == [50.0 for t in 1:4]
@test pu2.min_power == [0.0 for t in 1:4]
@test pu2.max_power == [120.0 for t in 1:4]
second_pu = sc.profiled_units[2]
@test second_pu.name == "g8"
@test second_pu.bus.name == "b5"
@test second_pu.cost == [50.0 for t in 1:4]
@test second_pu.min_power == [0.0 for t in 1:4]
@test second_pu.max_power == [120.0 for t in 1:4]
@test sc.profiled_units_by_name["g8"].name == "g8"
end
@@ -164,64 +165,4 @@ function instance_read_test()
@test sc.thermal_units[6].commitment_status ==
[false, nothing, true, nothing]
end
@testset "read_benchmark storage" begin
instance = UnitCommitment.read(fixture("case14-storage.json.gz"))
sc = instance.scenarios[1]
@test length(sc.storage_units) == 4
su1 = sc.storage_units[1]
@test su1.name == "su1"
@test su1.bus.name == "b2"
@test su1.min_level == [0.0 for t in 1:4]
@test su1.max_level == [100.0 for t in 1:4]
@test su1.simultaneous_charge_and_discharge == [true for t in 1:4]
@test su1.charge_cost == [2.0 for t in 1:4]
@test su1.discharge_cost == [2.5 for t in 1:4]
@test su1.charge_efficiency == [1.0 for t in 1:4]
@test su1.discharge_efficiency == [1.0 for t in 1:4]
@test su1.loss_factor == [0.0 for t in 1:4]
@test su1.min_charge_rate == [0.0 for t in 1:4]
@test su1.max_charge_rate == [10.0 for t in 1:4]
@test su1.min_discharge_rate == [0.0 for t in 1:4]
@test su1.max_discharge_rate == [8.0 for t in 1:4]
@test su1.initial_level == 0.0
@test su1.min_ending_level == 0.0
@test su1.max_ending_level == 100.0
@test sc.storage_units_by_name["su1"].name == "su1"
su2 = sc.storage_units[2]
@test su2.name == "su2"
@test su2.bus.name == "b2"
@test su2.min_level == [10.0 for t in 1:4]
@test su2.simultaneous_charge_and_discharge == [false for t in 1:4]
@test su2.charge_cost == [3.0 for t in 1:4]
@test su2.discharge_cost == [3.5 for t in 1:4]
@test su2.charge_efficiency == [0.8 for t in 1:4]
@test su2.discharge_efficiency == [0.85 for t in 1:4]
@test su2.loss_factor == [0.01 for t in 1:4]
@test su2.min_charge_rate == [5.0 for t in 1:4]
@test su2.min_discharge_rate == [2.0 for t in 1:4]
@test su2.initial_level == 70.0
@test su2.min_ending_level == 80.0
@test su2.max_ending_level == 85.0
@test sc.storage_units_by_name["su2"].name == "su2"
su3 = sc.storage_units[3]
@test su3.bus.name == "b9"
@test su3.min_level == [10.0, 11.0, 12.0, 13.0]
@test su3.max_level == [100.0, 110.0, 120.0, 130.0]
@test su3.charge_cost == [2.0, 2.1, 2.2, 2.3]
@test su3.discharge_cost == [1.0, 1.1, 1.2, 1.3]
@test su3.charge_efficiency == [0.8, 0.81, 0.82, 0.82]
@test su3.discharge_efficiency == [0.85, 0.86, 0.87, 0.88]
@test su3.min_charge_rate == [5.0, 5.1, 5.2, 5.3]
@test su3.max_charge_rate == [10.0, 10.1, 10.2, 10.3]
@test su3.min_discharge_rate == [4.0, 4.1, 4.2, 4.3]
@test su3.max_discharge_rate == [8.0, 8.1, 8.2, 8.3]
su4 = sc.storage_units[4]
@test su4.simultaneous_charge_and_discharge ==
[false, false, true, true]
end
end

View File

@@ -12,10 +12,7 @@ function lmp_aelmp_test()
instance = UnitCommitment.read(path)
model = UnitCommitment.build_model(
instance = instance,
optimizer = optimizer_with_attributes(
Cbc.Optimizer,
"logLevel" => 0,
),
optimizer = Cbc.Optimizer,
variable_names = true,
)
JuMP.set_silent(model)
@@ -25,10 +22,7 @@ function lmp_aelmp_test()
aelmp_1 = UnitCommitment.compute_lmp(
model,
AELMP(),
optimizer = optimizer_with_attributes(
HiGHS.Optimizer,
"log_to_console" => false,
),
optimizer = HiGHS.Optimizer,
)
@test aelmp_1["s1", "B1", 1] 231.7 atol = 0.1
@@ -39,10 +33,7 @@ function lmp_aelmp_test()
allow_offline_participation = false,
consider_startup_costs = true,
),
optimizer = optimizer_with_attributes(
HiGHS.Optimizer,
"log_to_console" => false,
),
optimizer = HiGHS.Optimizer,
)
@test aelmp_2["s1", "B1", 1] 274.3 atol = 0.1
end

View File

@@ -3,12 +3,13 @@
# Released under the modified BSD license. See COPYING.md for more details.
using UnitCommitment, Cbc, HiGHS, JuMP
import UnitCommitment: ConventionalLMP
function solve_conventional_testcase(path::String)
instance = UnitCommitment.read(path)
model = UnitCommitment.build_model(
instance = instance,
optimizer = optimizer_with_attributes(Cbc.Optimizer, "logLevel" => 0),
optimizer = Cbc.Optimizer,
variable_names = true,
)
JuMP.set_silent(model)
@@ -16,10 +17,7 @@ function solve_conventional_testcase(path::String)
lmp = UnitCommitment.compute_lmp(
model,
ConventionalLMP(),
optimizer = optimizer_with_attributes(
HiGHS.Optimizer,
"log_to_console" => false,
),
optimizer = HiGHS.Optimizer,
)
return lmp
end

View File

@@ -1,151 +0,0 @@
# UnitCommitment.jl: Optimization Package for Security-Constrained Unit Commitment
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
using UnitCommitment, Cbc, HiGHS, JuMP
import UnitCommitment: MarketSettings
function simple_market_test()
@testset "da-to-rt simple market" begin
da_path = fixture("market_da_simple.json.gz")
rt_paths = [
fixture("market_rt1_simple.json.gz"),
fixture("market_rt2_simple.json.gz"),
fixture("market_rt3_simple.json.gz"),
fixture("market_rt4_simple.json.gz"),
]
# solve market with default setting
solution = UnitCommitment.solve_market(
da_path,
rt_paths,
MarketSettings(), # keep everything default
optimizer = optimizer_with_attributes(
Cbc.Optimizer,
"logLevel" => 0,
),
lp_optimizer = optimizer_with_attributes(
HiGHS.Optimizer,
"log_to_console" => false,
),
)
# the commitment status must agree with DA market
da_solution = solution["Day-ahead market"]
@test da_solution["Is on"]["GenY"] == [0.0, 1.0]
@test da_solution["Locational marginal price"][("s1", "B1", 1)] == 50.0
@test da_solution["Locational marginal price"][("s1", "B1", 2)] == 56.0
rt_solution = solution["Real-time markets"]
@test length(rt_solution) == 4
@test rt_solution[rt_paths[1]]["Is on"]["GenY"] == [0.0, 0.0]
@test rt_solution[rt_paths[2]]["Is on"]["GenY"] == [0.0, 1.0]
@test rt_solution[rt_paths[3]]["Is on"]["GenY"] == [1.0, 1.0]
@test rt_solution[rt_paths[4]]["Is on"]["GenY"] == [1.0]
@test length(rt_solution[rt_paths[1]]["Locational marginal price"]) == 2
@test length(rt_solution[rt_paths[2]]["Locational marginal price"]) == 2
@test length(rt_solution[rt_paths[3]]["Locational marginal price"]) == 2
@test length(rt_solution[rt_paths[4]]["Locational marginal price"]) == 1
# solve market with no lmp method
solution_no_lmp = UnitCommitment.solve_market(
da_path,
rt_paths,
MarketSettings(lmp_method = nothing), # no lmp
optimizer = optimizer_with_attributes(
Cbc.Optimizer,
"logLevel" => 0,
),
)
# the commitment status must agree with DA market
da_solution = solution_no_lmp["Day-ahead market"]
@test haskey(da_solution, "Locational marginal price") == false
rt_solution = solution_no_lmp["Real-time markets"]
@test haskey(rt_solution, "Locational marginal price") == false
end
end
function stochastic_market_test()
@testset "da-to-rt stochastic market" begin
da_path = [
fixture("market_da_simple.json.gz"),
fixture("market_da_scenario.json.gz"),
]
rt_paths = [
fixture("market_rt1_simple.json.gz"),
fixture("market_rt2_simple.json.gz"),
fixture("market_rt3_simple.json.gz"),
fixture("market_rt4_simple.json.gz"),
]
# after build and after optimize
function after_build(model, instance)
@constraint(model, model[:is_on]["GenY", 1] == 1,)
end
lmps_da = []
lmps_rt = []
function after_optimize_da(solution, model, instance)
lmp = UnitCommitment.compute_lmp(
model,
ConventionalLMP(),
optimizer = optimizer_with_attributes(
HiGHS.Optimizer,
"log_to_console" => false,
),
)
return push!(lmps_da, lmp)
end
function after_optimize_rt(solution, model, instance)
lmp = UnitCommitment.compute_lmp(
model,
ConventionalLMP(),
optimizer = optimizer_with_attributes(
HiGHS.Optimizer,
"log_to_console" => false,
),
)
return push!(lmps_rt, lmp)
end
# solve the stochastic market with callbacks
solution = UnitCommitment.solve_market(
da_path,
rt_paths,
MarketSettings(), # keep everything default
optimizer = optimizer_with_attributes(
Cbc.Optimizer,
"logLevel" => 0,
),
lp_optimizer = optimizer_with_attributes(
HiGHS.Optimizer,
"log_to_console" => false,
),
after_build_da = after_build,
after_optimize_da = after_optimize_da,
after_optimize_rt = after_optimize_rt,
)
# the commitment status must agree with DA market
da_solution_sp = solution["Day-ahead market"]["market_da_simple"]
da_solution_sc = solution["Day-ahead market"]["market_da_scenario"]
@test da_solution_sc["Is on"]["GenY"] == [1.0, 1.0]
@test da_solution_sp["Locational marginal price"][(
"market_da_simple",
"B1",
1,
)] == 25.0
@test da_solution_sc["Locational marginal price"][(
"market_da_scenario",
"B1",
2,
)] == 0.0
rt_solution = solution["Real-time markets"]
@test rt_solution[rt_paths[1]]["Is on"]["GenY"] == [1.0, 1.0]
@test rt_solution[rt_paths[2]]["Is on"]["GenY"] == [1.0, 1.0]
@test rt_solution[rt_paths[3]]["Is on"]["GenY"] == [1.0, 1.0]
@test rt_solution[rt_paths[4]]["Is on"]["GenY"] == [1.0]
@test length(lmps_rt) == 4
end
end

View File

@@ -1,20 +0,0 @@
# UnitCommitment.jl: Optimization Package for Security-Constrained Unit Commitment
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
using HiGHS
function solution_methods_MIPLearn_usage_test()
dirname = mktempdir()
cp(fixture("case14.json.gz"), "$dirname/case14.json.gz")
train_data = ["$dirname/case14.json.gz"]
method = UnitCommitment.MIPLearnMethod(optimizer = HiGHS.Optimizer)
UnitCommitment.collect!(train_data, method)
UnitCommitment.fit!(train_data, method)
UnitCommitment.optimize!(train_data[1], method)
instance = UnitCommitment.read(train_data[1])
UnitCommitment.optimize!(instance, method)
return
end

View File

@@ -1,40 +0,0 @@
using HiGHS
using MPI
using JuMP
using UnitCommitment
UnitCommitment._setup_logger(level = Base.CoreLogging.Error)
function fixture(path::String)::String
basedir = dirname(@__FILE__)
return "$basedir/../../../../fixtures/$path"
end
# Initialize MPI
MPI.Init()
# Configure progressive hedging method
ph = UnitCommitment.ProgressiveHedging()
# Read problem instance
instance = UnitCommitment.read(
[fixture("case14.json.gz"), fixture("case14.json.gz")],
ph,
)
# Build JuMP model
model = UnitCommitment.build_model(
instance = instance,
optimizer = optimizer_with_attributes(
HiGHS.Optimizer,
MOI.Silent() => true,
),
)
# Run the decentralized optimization algorithm
UnitCommitment.optimize!(model, ph)
# Fetch the solution
solution = UnitCommitment.solution(model, ph)
# Close MPI
MPI.Finalize()

View File

@@ -1,16 +0,0 @@
# UnitCommitment.jl: Optimization Package for Security-Constrained Unit Commitment
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
using MPI
function solution_methods_ProgressiveHedging_usage_test()
basedir = dirname(@__FILE__)
@testset "ProgressiveHedging" begin
mpiexec() do exe
return run(
`$exe -n 2 $(Base.julia_cmd()) --project=test $basedir/ph.jl`,
)
end
end
end

View File

@@ -1,159 +0,0 @@
# UnitCommitment.jl: Optimization Package for Security-Constrained Unit Commitment
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
using UnitCommitment, DataStructures
function solution_methods_TimeDecomposition_initial_status_test()
@testset "determine_initial_status" begin
hot_start = 100
cold_start = -100
# all on throughout
stat_seq = ones(36)
# hot start
new_stat = UnitCommitment._determine_initial_status(hot_start, stat_seq)
@test new_stat == 136
# cold start
new_stat =
UnitCommitment._determine_initial_status(cold_start, stat_seq)
@test new_stat == 36
# off in the last 12 periods
stat_seq = ones(36)
stat_seq[25:end] .= 0
# hot start
new_stat = UnitCommitment._determine_initial_status(hot_start, stat_seq)
@test new_stat == -12
# cold start
new_stat =
UnitCommitment._determine_initial_status(cold_start, stat_seq)
@test new_stat == -12
# off in one period
stat_seq = ones(36)
stat_seq[10] = 0
# hot start
new_stat = UnitCommitment._determine_initial_status(hot_start, stat_seq)
@test new_stat == 26
# cold start
new_stat =
UnitCommitment._determine_initial_status(cold_start, stat_seq)
@test new_stat == 26
# off in several of the first 24 periods
stat_seq = ones(36)
stat_seq[[10, 11, 20]] .= 0
# hot start
new_stat = UnitCommitment._determine_initial_status(hot_start, stat_seq)
@test new_stat == 16
# cold start
new_stat =
UnitCommitment._determine_initial_status(cold_start, stat_seq)
@test new_stat == 16
# all off throughout
stat_seq = zeros(36)
# hot start
new_stat = UnitCommitment._determine_initial_status(hot_start, stat_seq)
@test new_stat == -36
# cold start
new_stat =
UnitCommitment._determine_initial_status(cold_start, stat_seq)
@test new_stat == -136
# on in the last 12 periods
stat_seq = zeros(36)
stat_seq[25:end] .= 1
# hot start
new_stat = UnitCommitment._determine_initial_status(hot_start, stat_seq)
@test new_stat == 12
# cold start
new_stat =
UnitCommitment._determine_initial_status(cold_start, stat_seq)
@test new_stat == 12
end
@testset "set_initial_status" begin
# read one scenario
instance = UnitCommitment.read(fixture("case14.json.gz"))
psuedo_solution = OrderedDict(
"Thermal production (MW)" => OrderedDict(
"g1" => [0.0, 112.0, 114.0, 116.0],
"g2" => [0.0, 102.0, 0.0, 0.0],
"g3" => [0.0, 0.0, 0.0, 0.0],
"g4" => [0.0, 34.0, 66.0, 99.0],
"g5" => [0.0, 34.0, 66.0, 99.0],
"g6" => [0.0, 100.0, 100.0, 100.0],
),
"Is on" => OrderedDict(
"g1" => [0.0, 1.0, 1.0, 1.0],
"g2" => [0.0, 1.0, 0.0, 0.0],
"g3" => [0.0, 0.0, 0.0, 0.0],
"g4" => [0.0, 1.0, 1.0, 1.0],
"g5" => [0.0, 1.0, 1.0, 1.0],
"g6" => [0.0, 1.0, 1.0, 1.0],
),
)
UnitCommitment._set_initial_status!(instance, psuedo_solution, 3)
thermal_units = instance.scenarios[1].thermal_units
@test thermal_units[1].initial_power == 116.0
@test thermal_units[1].initial_status == 3.0
@test thermal_units[2].initial_power == 0.0
@test thermal_units[2].initial_status == -2.0
@test thermal_units[3].initial_power == 0.0
@test thermal_units[3].initial_status == -9.0
# read multiple scenarios
instance = UnitCommitment.read([
fixture("case14.json.gz"),
fixture("case14-profiled.json.gz"),
])
psuedo_solution = OrderedDict(
"case14" => OrderedDict(
"Thermal production (MW)" => OrderedDict(
"g1" => [0.0, 112.0, 114.0, 116.0],
"g2" => [0.0, 102.0, 0.0, 0.0],
"g3" => [0.0, 0.0, 0.0, 0.0],
"g4" => [0.0, 34.0, 66.0, 99.0],
"g5" => [0.0, 34.0, 66.0, 99.0],
"g6" => [0.0, 100.0, 100.0, 100.0],
),
"Is on" => OrderedDict(
"g1" => [0.0, 1.0, 1.0, 1.0],
"g2" => [0.0, 1.0, 0.0, 0.0],
"g3" => [0.0, 0.0, 0.0, 0.0],
"g4" => [0.0, 1.0, 1.0, 1.0],
"g5" => [0.0, 1.0, 1.0, 1.0],
"g6" => [0.0, 1.0, 1.0, 1.0],
),
),
"case14-profiled" => OrderedDict(
"Thermal production (MW)" => OrderedDict(
"g1" => [0.0, 113.0, 116.0, 115.0],
"g2" => [0.0, 0.0, 0.0, 0.0],
"g3" => [0.0, 0.0, 0.0, 20.0],
"g4" => [0.0, 34.0, 66.0, 98.0],
"g5" => [0.0, 34.0, 66.0, 97.0],
"g6" => [0.0, 100.0, 100.0, 100.0],
),
"Is on" => OrderedDict(
"g1" => [0.0, 1.0, 1.0, 1.0],
"g2" => [0.0, 0.0, 0.0, 0.0],
"g3" => [0.0, 0.0, 0.0, 1.0],
"g4" => [0.0, 1.0, 1.0, 1.0],
"g5" => [0.0, 1.0, 1.0, 1.0],
"g6" => [0.0, 1.0, 1.0, 1.0],
),
),
)
UnitCommitment._set_initial_status!(instance, psuedo_solution, 3)
thermal_units_sc2 = instance.scenarios[2].thermal_units
@test thermal_units_sc2[1].initial_power == 115.0
@test thermal_units_sc2[1].initial_status == 3.0
@test thermal_units_sc2[2].initial_power == 0.0
@test thermal_units_sc2[2].initial_status == -11.0
@test thermal_units_sc2[3].initial_power == 20.0
@test thermal_units_sc2[3].initial_status == 1.0
end
end

View File

@@ -1,88 +0,0 @@
# UnitCommitment.jl: Optimization Package for Security-Constrained Unit Commitment
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
using UnitCommitment, DataStructures, Cbc, HiGHS
import UnitCommitment: TimeDecomposition, ConventionalLMP
function solution_methods_TimeDecomposition_optimize_test()
@testset "optimize_time_decomposition" begin
# read one scenario
instance = UnitCommitment.read(fixture("case14.json.gz"))
solution = UnitCommitment.optimize!(
instance,
TimeDecomposition(time_window = 3, time_increment = 2),
optimizer = optimizer_with_attributes(
Cbc.Optimizer,
"logLevel" => 0,
),
)
@test length(solution["Thermal production (MW)"]["g1"]) == 4
@test length(solution["Is on"]["g2"]) == 4
@test length(solution["Spinning reserve (MW)"]["r1"]["g2"]) == 4
# read one scenario with after_build and after_optimize
function after_build(model, instance)
@constraint(
model,
model[:is_on]["g3", 1] + model[:is_on]["g4", 1] <= 1,
)
end
lmps = []
function after_optimize(solution, model, instance)
lmp = UnitCommitment.compute_lmp(
model,
ConventionalLMP(),
optimizer = optimizer_with_attributes(
HiGHS.Optimizer,
"log_to_console" => false,
),
)
return push!(lmps, lmp)
end
instance = UnitCommitment.read(fixture("case14-profiled.json.gz"))
solution = UnitCommitment.optimize!(
instance,
TimeDecomposition(time_window = 3, time_increment = 2),
optimizer = optimizer_with_attributes(
Cbc.Optimizer,
"logLevel" => 0,
),
after_build = after_build,
after_optimize = after_optimize,
)
@test length(lmps) == 2
@test lmps[1]["s1", "b1", 1] == 50.0
@test lmps[2]["s1", "b10", 2] 38.04 atol = 0.1
@test solution["Is on"]["g3"][1] == 1.0
@test solution["Is on"]["g4"][1] == 0.0
# read multiple scenarios
instance = UnitCommitment.read([
fixture("case14.json.gz"),
fixture("case14-profiled.json.gz"),
])
solution = UnitCommitment.optimize!(
instance,
TimeDecomposition(time_window = 3, time_increment = 2),
optimizer = optimizer_with_attributes(
Cbc.Optimizer,
"logLevel" => 0,
),
)
@test length(solution["case14"]["Thermal production (MW)"]["g3"]) == 4
@test length(solution["case14"]["Is on"]["g4"]) == 4
@test length(
solution["case14-profiled"]["Thermal production (MW)"]["g5"],
) == 4
@test length(solution["case14-profiled"]["Is on"]["g6"]) == 4
@test length(
solution["case14-profiled"]["Profiled production (MW)"]["g7"],
) == 4
@test length(
solution["case14-profiled"]["Spinning reserve (MW)"]["r1"]["g3"],
) == 4
end
end

View File

@@ -1,55 +0,0 @@
# UnitCommitment.jl: Optimization Package for Security-Constrained Unit Commitment
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
using UnitCommitment, DataStructures
function solution_methods_TimeDecomposition_update_solution_test()
@testset "update_solution" begin
psuedo_solution = OrderedDict()
time_increment = 4
psuedo_sub_solution = OrderedDict(
"Thermal production (MW)" => OrderedDict(
"g1" => [100.0, 200.0, 300.0, 400.0, 500.0, 600.0],
),
"Is on" => OrderedDict("g1" => [1.0, 0.0, 1.0, 1.0, 0.0, 1.0]),
"Profiled production (MW)" => OrderedDict(
"g1" => [199.0, 299.0, 399.0, 499.0, 599.0, 699.0],
),
"Spinning reserve (MW)" => OrderedDict(
"r1" => OrderedDict(
"g1" => [31.0, 32.0, 33.0, 34.0, 35.0, 36.0],
),
),
)
# first update should directly copy the first 4 entries of sub solution
UnitCommitment._update_solution!(
psuedo_solution,
psuedo_sub_solution,
time_increment,
)
@test psuedo_solution["Thermal production (MW)"]["g1"] ==
[100.0, 200.0, 300.0, 400.0]
@test psuedo_solution["Is on"]["g1"] == [1.0, 0.0, 1.0, 1.0]
@test psuedo_solution["Profiled production (MW)"]["g1"] ==
[199.0, 299.0, 399.0, 499.0]
@test psuedo_solution["Spinning reserve (MW)"]["r1"]["g1"] ==
[31.0, 32.0, 33.0, 34.0]
# second update should append the first 4 entries of sub solution
UnitCommitment._update_solution!(
psuedo_solution,
psuedo_sub_solution,
time_increment,
)
@test psuedo_solution["Thermal production (MW)"]["g1"] ==
[100.0, 200.0, 300.0, 400.0, 100.0, 200.0, 300.0, 400.0]
@test psuedo_solution["Is on"]["g1"] ==
[1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0]
@test psuedo_solution["Profiled production (MW)"]["g1"] ==
[199.0, 299.0, 399.0, 499.0, 199.0, 299.0, 399.0, 499.0]
@test psuedo_solution["Spinning reserve (MW)"]["r1"]["g1"] ==
[31.0, 32.0, 33.0, 34.0, 31.0, 32.0, 33.0, 34.0]
end
end

View File

@@ -2,32 +2,27 @@
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
using UnitCommitment, HiGHS, JuMP
using UnitCommitment, Cbc, JuMP
function transform_initcond_test()
@testset "generate_initial_conditions!" begin
# Load instance
instance = UnitCommitment.read(fixture("case118-initcond.json.gz"))
optimizer =
optimizer_with_attributes(HiGHS.Optimizer, MOI.Silent() => true)
optimizer = optimizer_with_attributes(Cbc.Optimizer, "logLevel" => 0)
sc = instance.scenarios[1]
# All units should have unknown initial conditions
for sc in instance.scenarios
for g in sc.thermal_units
@test g.initial_power === nothing
@test g.initial_status === nothing
end
for g in sc.thermal_units
@test g.initial_power === nothing
@test g.initial_status === nothing
end
# Generate initial conditions
UnitCommitment.generate_initial_conditions!(instance, optimizer)
UnitCommitment.generate_initial_conditions!(sc, optimizer)
# All units should now have known initial conditions
for sc in instance.scenarios
for g in sc.thermal_units
@test g.initial_power !== nothing
@test g.initial_status !== nothing
end
for g in sc.thermal_units
@test g.initial_power !== nothing
@test g.initial_status !== nothing
end
# TODO: Check that initial conditions are feasible

View File

@@ -102,28 +102,5 @@ function transform_randomize_XavQiuAhm2021_test()
test_approx(pu1.cost[1], 98.039)
test_approx(pu2.cost[1], 48.385)
end
@testset "storage unit cost" begin
sc = UnitCommitment.read(
fixture("case14-storage.json.gz"),
).scenarios[1]
# Check original costs
su1 = sc.storage_units[1]
su3 = sc.storage_units[3]
test_approx(su1.charge_cost[4], 2.0)
test_approx(su1.discharge_cost[1], 2.5)
test_approx(su3.charge_cost[2], 2.1)
test_approx(su3.discharge_cost[3], 1.2)
randomize!(
sc,
XavQiuAhm2021.Randomization(randomize_load_profile = false),
rng = MersenneTwister(42),
)
# Check randomized costs
test_approx(su1.charge_cost[4], 1.961)
test_approx(su1.discharge_cost[1], 2.451)
test_approx(su3.charge_cost[2], 2.196)
test_approx(su3.discharge_cost[3], 1.255)
end
end
end

View File

@@ -65,34 +65,4 @@ function transform_slice_test()
variable_names = true,
)
end
@testset "slice storage units" begin
instance = UnitCommitment.read(fixture("case14-storage.json.gz"))
modified = UnitCommitment.slice(instance, 2:4)
sc = modified.scenarios[1]
# Should update all time-dependent fields
for su in sc.storage_units
@test length(su.min_level) == 3
@test length(su.max_level) == 3
@test length(su.simultaneous_charge_and_discharge) == 3
@test length(su.charge_cost) == 3
@test length(su.discharge_cost) == 3
@test length(su.charge_efficiency) == 3
@test length(su.discharge_efficiency) == 3
@test length(su.loss_factor) == 3
@test length(su.min_charge_rate) == 3
@test length(su.max_charge_rate) == 3
@test length(su.min_discharge_rate) == 3
@test length(su.max_discharge_rate) == 3
end
# Should be able to build model without errors
optimizer = optimizer_with_attributes(Cbc.Optimizer, "logLevel" => 0)
model = UnitCommitment.build_model(
instance = modified,
optimizer = optimizer,
variable_names = true,
)
end
end