mirror of
https://github.com/ANL-CEEESA/UnitCommitment.jl.git
synced 2025-12-06 08:18:51 -06:00
Update benchmark scripts
This commit is contained in:
2
Makefile
2
Makefile
@@ -8,7 +8,7 @@ VERSION := 0.2
|
||||
build/sysimage.so: src/utils/sysimage.jl Project.toml Manifest.toml
|
||||
mkdir -p build
|
||||
mkdir -p benchmark/results/test
|
||||
cd benchmark; $(JULIA) --trace-compile=../build/precompile.jl run.jl test/case14.1.sol.json
|
||||
cd benchmark; $(JULIA) --trace-compile=../build/precompile.jl benchmark.jl test/case14
|
||||
$(JULIA) src/utils/sysimage.jl
|
||||
|
||||
clean:
|
||||
|
||||
@@ -1,105 +0,0 @@
|
||||
# UnitCommitment.jl: Optimization Package for Security-Constrained Unit Commitment
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
SHELL := /bin/bash
|
||||
JULIA := julia --project=. --sysimage ../build/sysimage.so
|
||||
TIMESTAMP := $(shell date "+%Y-%m-%d %H:%M")
|
||||
SRC_FILES := $(wildcard ../src/*.jl)
|
||||
|
||||
INSTANCES_PGLIB := \
|
||||
pglib-uc/ca/2014-09-01_reserves_0 \
|
||||
pglib-uc/ca/2014-09-01_reserves_1 \
|
||||
pglib-uc/ca/2015-03-01_reserves_0 \
|
||||
pglib-uc/ca/2015-06-01_reserves_0 \
|
||||
pglib-uc/ca/Scenario400_reserves_1 \
|
||||
pglib-uc/ferc/2015-01-01_lw \
|
||||
pglib-uc/ferc/2015-05-01_lw \
|
||||
pglib-uc/ferc/2015-07-01_hw \
|
||||
pglib-uc/ferc/2015-10-01_lw \
|
||||
pglib-uc/ferc/2015-12-01_lw \
|
||||
pglib-uc/rts_gmlc/2020-04-03 \
|
||||
pglib-uc/rts_gmlc/2020-09-20 \
|
||||
pglib-uc/rts_gmlc/2020-10-27 \
|
||||
pglib-uc/rts_gmlc/2020-11-25 \
|
||||
pglib-uc/rts_gmlc/2020-12-23
|
||||
|
||||
INSTANCES_MATPOWER := \
|
||||
matpower/case118/2017-02-01 \
|
||||
matpower/case118/2017-08-01 \
|
||||
matpower/case300/2017-02-01 \
|
||||
matpower/case300/2017-08-01 \
|
||||
matpower/case1354pegase/2017-02-01 \
|
||||
matpower/case1888rte/2017-02-01 \
|
||||
matpower/case1951rte/2017-08-01 \
|
||||
matpower/case2848rte/2017-02-01 \
|
||||
matpower/case2868rte/2017-08-01 \
|
||||
matpower/case3375wp/2017-08-01 \
|
||||
matpower/case6468rte/2017-08-01 \
|
||||
matpower/case6515rte/2017-08-01
|
||||
|
||||
INSTANCES_ORLIB := \
|
||||
or-lib/20_0_1_w \
|
||||
or-lib/20_0_5_w \
|
||||
or-lib/50_0_2_w \
|
||||
or-lib/75_0_2_w \
|
||||
or-lib/100_0_1_w \
|
||||
or-lib/100_0_4_w \
|
||||
or-lib/100_0_5_w \
|
||||
or-lib/200_0_3_w \
|
||||
or-lib/200_0_7_w \
|
||||
or-lib/200_0_9_w
|
||||
|
||||
INSTANCES_TEJADA19 := \
|
||||
tejada19/UC_24h_290g \
|
||||
tejada19/UC_24h_623g \
|
||||
tejada19/UC_24h_959g \
|
||||
tejada19/UC_24h_1577g \
|
||||
tejada19/UC_24h_1888g \
|
||||
tejada19/UC_168h_72g \
|
||||
tejada19/UC_168h_86g \
|
||||
tejada19/UC_168h_130g \
|
||||
tejada19/UC_168h_131g \
|
||||
tejada19/UC_168h_199g
|
||||
|
||||
SAMPLES := 1 2 3 4 5
|
||||
SOLUTIONS_MATPOWER := $(foreach s,$(SAMPLES),$(addprefix results/,$(addsuffix .$(s).sol.json,$(INSTANCES_MATPOWER))))
|
||||
SOLUTIONS_PGLIB := $(foreach s,$(SAMPLES),$(addprefix results/,$(addsuffix .$(s).sol.json,$(INSTANCES_PGLIB))))
|
||||
SOLUTIONS_ORLIB := $(foreach s,$(SAMPLES),$(addprefix results/,$(addsuffix .$(s).sol.json,$(INSTANCES_ORLIB))))
|
||||
SOLUTIONS_TEJADA19 := $(foreach s,$(SAMPLES),$(addprefix results/,$(addsuffix .$(s).sol.json,$(INSTANCES_TEJADA19))))
|
||||
|
||||
.PHONY: matpower pglib orlib tejada19 clean clean-mps clean-sol save tables
|
||||
|
||||
all: matpower pglib orlib tejada19
|
||||
|
||||
matpower: $(SOLUTIONS_MATPOWER)
|
||||
|
||||
pglib: $(SOLUTIONS_PGLIB)
|
||||
|
||||
orlib: $(SOLUTIONS_ORLIB)
|
||||
|
||||
tejada19: $(SOLUTIONS_TEJADA19)
|
||||
|
||||
clean:
|
||||
@rm -rf tables/benchmark* tables/compare* results
|
||||
|
||||
clean-mps:
|
||||
@rm -fv results/*/*.mps.gz results/*/*/*.mps.gz
|
||||
|
||||
clean-sol:
|
||||
@rm -rf results/*/*.sol.* results/*/*/*.sol.*
|
||||
|
||||
save:
|
||||
mkdir -p "runs/$(TIMESTAMP)"
|
||||
rsync -avP results tables "runs/$(TIMESTAMP)/"
|
||||
|
||||
results/%.sol.json: run.jl
|
||||
@echo "run $*"
|
||||
@mkdir -p $(dir results/$*)
|
||||
@$(JULIA) run.jl $* 2>&1 | cat > results/$*.log
|
||||
@echo "run $* [done]"
|
||||
|
||||
tables:
|
||||
@mkdir -p tables
|
||||
@python scripts/table.py
|
||||
#@python scripts/compare.py tables/reference.csv tables/benchmark.csv
|
||||
162
benchmark/benchmark.jl
Normal file
162
benchmark/benchmark.jl
Normal file
@@ -0,0 +1,162 @@
|
||||
# UnitCommitment.jl: Optimization Package for Security-Constrained Unit Commitment
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
using Distributed
|
||||
using Pkg
|
||||
Pkg.activate(".")
|
||||
|
||||
@everywhere using Pkg
|
||||
@everywhere Pkg.activate(".")
|
||||
|
||||
@everywhere using UnitCommitment
|
||||
@everywhere using JuMP
|
||||
@everywhere using Gurobi
|
||||
@everywhere using JSON
|
||||
@everywhere using Logging
|
||||
@everywhere using Printf
|
||||
@everywhere using LinearAlgebra
|
||||
@everywhere using Random
|
||||
|
||||
@everywhere UnitCommitment._setup_logger()
|
||||
|
||||
function main()
|
||||
cases = [
|
||||
"pglib-uc/ca/2014-09-01_reserves_0",
|
||||
"pglib-uc/ca/2014-09-01_reserves_1",
|
||||
"pglib-uc/ca/2015-03-01_reserves_0",
|
||||
"pglib-uc/ca/2015-06-01_reserves_0",
|
||||
"pglib-uc/ca/Scenario400_reserves_1",
|
||||
"pglib-uc/ferc/2015-01-01_lw",
|
||||
"pglib-uc/ferc/2015-05-01_lw",
|
||||
"pglib-uc/ferc/2015-07-01_hw",
|
||||
"pglib-uc/ferc/2015-10-01_lw",
|
||||
"pglib-uc/ferc/2015-12-01_lw",
|
||||
"pglib-uc/rts_gmlc/2020-04-03",
|
||||
"pglib-uc/rts_gmlc/2020-09-20",
|
||||
"pglib-uc/rts_gmlc/2020-10-27",
|
||||
"pglib-uc/rts_gmlc/2020-11-25",
|
||||
"pglib-uc/rts_gmlc/2020-12-23",
|
||||
"or-lib/20_0_1_w",
|
||||
"or-lib/20_0_5_w",
|
||||
"or-lib/50_0_2_w",
|
||||
"or-lib/75_0_2_w",
|
||||
"or-lib/100_0_1_w",
|
||||
"or-lib/100_0_4_w",
|
||||
"or-lib/100_0_5_w",
|
||||
"or-lib/200_0_3_w",
|
||||
"or-lib/200_0_7_w",
|
||||
"or-lib/200_0_9_w",
|
||||
"tejada19/UC_24h_290g",
|
||||
"tejada19/UC_24h_623g",
|
||||
"tejada19/UC_24h_959g",
|
||||
"tejada19/UC_24h_1577g",
|
||||
"tejada19/UC_24h_1888g",
|
||||
"tejada19/UC_168h_72g",
|
||||
"tejada19/UC_168h_86g",
|
||||
"tejada19/UC_168h_130g",
|
||||
"tejada19/UC_168h_131g",
|
||||
"tejada19/UC_168h_199g",
|
||||
]
|
||||
formulations = Dict(
|
||||
# "ArrCon00" => UnitCommitment.Formulation(
|
||||
# ramping=UnitCommitment._ArrCon00(),
|
||||
# ),
|
||||
"DamKucRajAta16" => UnitCommitment.Formulation(
|
||||
ramping = UnitCommitment.DamKucRajAta16(),
|
||||
),
|
||||
"MorLatRam13" => UnitCommitment.Formulation(
|
||||
ramping = UnitCommitment.MorLatRam13(),
|
||||
),
|
||||
)
|
||||
trials = [i for i in 1:5]
|
||||
combinations = [
|
||||
(c, f.first, f.second, t) for c in cases for f in formulations for
|
||||
t in trials
|
||||
]
|
||||
shuffle!(combinations)
|
||||
@sync @distributed for c in combinations
|
||||
_run_combination(c...)
|
||||
end
|
||||
end
|
||||
|
||||
@everywhere function _run_combination(
|
||||
case,
|
||||
formulation_name,
|
||||
formulation,
|
||||
trial,
|
||||
)
|
||||
name = "$formulation_name/$case"
|
||||
dirname = "results/$name"
|
||||
mkpath(dirname)
|
||||
if isfile("$dirname/$trial.json")
|
||||
@info @sprintf(
|
||||
"%-8s %-20s %-40s",
|
||||
"skip",
|
||||
formulation_name,
|
||||
"$case/$trial",
|
||||
)
|
||||
return
|
||||
end
|
||||
@info @sprintf(
|
||||
"%-8s %-20s %-40s",
|
||||
"start",
|
||||
formulation_name,
|
||||
"$case/$trial",
|
||||
)
|
||||
time = @elapsed open("$dirname/$trial.log", "w") do file
|
||||
redirect_stdout(file) do
|
||||
redirect_stderr(file) do
|
||||
return _run_sample(case, formulation, "$dirname/$trial")
|
||||
end
|
||||
end
|
||||
end
|
||||
@info @sprintf(
|
||||
"%-8s %-20s %-40s %12.3f",
|
||||
"finish",
|
||||
formulation_name,
|
||||
"$case/$trial",
|
||||
time
|
||||
)
|
||||
end
|
||||
|
||||
@everywhere function _run_sample(case, formulation, prefix)
|
||||
total_time = @elapsed begin
|
||||
@info "Reading: $case"
|
||||
time_read = @elapsed begin
|
||||
instance = UnitCommitment.read_benchmark(case)
|
||||
end
|
||||
@info @sprintf("Read problem in %.2f seconds", time_read)
|
||||
BLAS.set_num_threads(4)
|
||||
model = UnitCommitment._build_model(
|
||||
instance,
|
||||
formulation,
|
||||
optimizer = optimizer_with_attributes(
|
||||
Gurobi.Optimizer,
|
||||
"Threads" => 4,
|
||||
"Seed" => rand(1:1000),
|
||||
),
|
||||
variable_names = true,
|
||||
)
|
||||
@info "Optimizing..."
|
||||
BLAS.set_num_threads(1)
|
||||
UnitCommitment.optimize!(
|
||||
model,
|
||||
UnitCommitment.XavQiuWanThi19(time_limit = 3600.0, gap_limit=1e-4),
|
||||
)
|
||||
end
|
||||
@info @sprintf("Total time was %.2f seconds", total_time)
|
||||
@info "Writing solution: $prefix.json"
|
||||
solution = UnitCommitment.solution(model)
|
||||
UnitCommitment.write("$prefix.json", solution)
|
||||
@info "Verifying solution..."
|
||||
return UnitCommitment.validate(instance, solution)
|
||||
# @info "Exporting model..."
|
||||
# return JuMP.write_to_file(model, model_filename)
|
||||
end
|
||||
|
||||
if length(ARGS) > 0
|
||||
_run_sample(ARGS[1], UnitCommitment.Formulation(), "tmp")
|
||||
else
|
||||
main()
|
||||
end
|
||||
@@ -1,54 +0,0 @@
|
||||
# UnitCommitment.jl: Optimization Package for Security-Constrained Unit Commitment
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
using UnitCommitment
|
||||
using JuMP
|
||||
using Gurobi
|
||||
using JSON
|
||||
using Logging
|
||||
using Printf
|
||||
using LinearAlgebra
|
||||
|
||||
UnitCommitment._setup_logger()
|
||||
|
||||
function main()
|
||||
basename, suffix = split(ARGS[1], ".")
|
||||
solution_filename = "results/$basename.$suffix.sol.json"
|
||||
model_filename = "results/$basename.$suffix.mps.gz"
|
||||
BLAS.set_num_threads(4)
|
||||
total_time = @elapsed begin
|
||||
@info "Reading: $basename"
|
||||
time_read = @elapsed begin
|
||||
instance = UnitCommitment.read_benchmark(basename)
|
||||
end
|
||||
@info @sprintf("Read problem in %.2f seconds", time_read)
|
||||
model = UnitCommitment.build_model(
|
||||
instance = instance,
|
||||
optimizer = optimizer_with_attributes(
|
||||
Gurobi.Optimizer,
|
||||
"Threads" => 4,
|
||||
"Seed" => rand(1:1000),
|
||||
),
|
||||
variable_names = true,
|
||||
)
|
||||
@info "Optimizing..."
|
||||
BLAS.set_num_threads(1)
|
||||
UnitCommitment.optimize!(
|
||||
model,
|
||||
UnitCommitment._XaQiWaTh19(time_limit = 3600.0),
|
||||
)
|
||||
end
|
||||
@info @sprintf("Total time was %.2f seconds", total_time)
|
||||
@info "Writing: $solution_filename"
|
||||
solution = UnitCommitment.solution(model)
|
||||
open(solution_filename, "w") do file
|
||||
return JSON.print(file, solution, 2)
|
||||
end
|
||||
@info "Verifying solution..."
|
||||
UnitCommitment.validate(instance, solution)
|
||||
@info "Exporting model..."
|
||||
return JuMP.write_to_file(model, model_filename)
|
||||
end
|
||||
|
||||
main()
|
||||
@@ -5,71 +5,84 @@
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
import seaborn as sns
|
||||
import matplotlib
|
||||
import matplotlib.pyplot as plt
|
||||
import sys
|
||||
|
||||
# easy_cutoff = 120
|
||||
|
||||
b1 = pd.read_csv(sys.argv[1], index_col=0)
|
||||
b2 = pd.read_csv(sys.argv[2], index_col=0)
|
||||
|
||||
c1 = b1.groupby(["Group", "Instance", "Sample"])[
|
||||
["Optimization time (s)", "Primal bound"]
|
||||
].mean()
|
||||
c2 = b2.groupby(["Group", "Instance", "Sample"])[
|
||||
["Optimization time (s)", "Primal bound"]
|
||||
].mean()
|
||||
c1.columns = ["A Time (s)", "A Value"]
|
||||
c2.columns = ["B Time (s)", "B Value"]
|
||||
|
||||
merged = pd.concat([c1, c2], axis=1)
|
||||
merged["Speedup"] = merged["A Time (s)"] / merged["B Time (s)"]
|
||||
merged["Time diff (s)"] = merged["B Time (s)"] - merged["A Time (s)"]
|
||||
merged["Value diff (%)"] = np.round(
|
||||
(merged["B Value"] - merged["A Value"]) / merged["A Value"] * 100.0, 5
|
||||
matplotlib.use("Agg")
|
||||
sns.set("talk")
|
||||
sns.set_palette(
|
||||
[
|
||||
"#9b59b6",
|
||||
"#3498db",
|
||||
"#95a5a6",
|
||||
"#e74c3c",
|
||||
"#34495e",
|
||||
"#2ecc71",
|
||||
]
|
||||
)
|
||||
merged.loc[merged.loc[:, "B Time (s)"] <= 0, "Speedup"] = float("nan")
|
||||
merged.loc[merged.loc[:, "B Time (s)"] <= 0, "Time diff (s)"] = float("nan")
|
||||
# merged = merged[(merged["A Time (s)"] >= easy_cutoff) | (merged["B Time (s)"] >= easy_cutoff)]
|
||||
merged.reset_index(inplace=True)
|
||||
merged["Name"] = merged["Group"] + "/" + merged["Instance"]
|
||||
# merged = merged.sort_values(by="Speedup", ascending=False)
|
||||
|
||||
filename = sys.argv[1]
|
||||
m1 = sys.argv[2]
|
||||
m2 = sys.argv[3]
|
||||
|
||||
k = len(merged.groupby("Name"))
|
||||
plt.figure(figsize=(12, 0.50 * k))
|
||||
plt.rcParams["xtick.bottom"] = plt.rcParams["xtick.labelbottom"] = True
|
||||
plt.rcParams["xtick.top"] = plt.rcParams["xtick.labeltop"] = True
|
||||
sns.set_style("whitegrid")
|
||||
sns.set_palette("Set1")
|
||||
# Prepare data
|
||||
data = pd.read_csv(filename, index_col=0)
|
||||
b1 = (
|
||||
data[data["Group"] == m1]
|
||||
.groupby(["Instance", "Sample"])
|
||||
.mean()[["Optimization time (s)"]]
|
||||
)
|
||||
b2 = (
|
||||
data[data["Group"] == m2]
|
||||
.groupby(["Instance", "Sample"])
|
||||
.mean()[["Optimization time (s)"]]
|
||||
)
|
||||
b1.columns = [f"{m1} time (s)"]
|
||||
b2.columns = [f"{m2} time (s)"]
|
||||
merged = pd.merge(b1, b2, left_index=True, right_index=True).reset_index().dropna()
|
||||
merged["Speedup"] = merged[f"{m1} time (s)"] / merged[f"{m2} time (s)"]
|
||||
merged["Group"] = merged["Instance"].str.replace(r"\/.*", "", regex=True)
|
||||
merged = merged.sort_values(by=["Instance", "Sample"], ascending=True)
|
||||
merged = merged[(merged[f"{m1} time (s)"] > 0) & (merged[f"{m2} time (s)"] > 0)]
|
||||
|
||||
# Plot results
|
||||
k1 = len(merged.groupby("Instance").mean())
|
||||
k2 = len(merged.groupby("Group").mean())
|
||||
k = k1 + k2
|
||||
fig = plt.figure(
|
||||
constrained_layout=True,
|
||||
figsize=(15, max(5, 0.75 * k)),
|
||||
)
|
||||
plt.suptitle(f"{m1} vs {m2}")
|
||||
gs1 = fig.add_gridspec(nrows=k, ncols=1)
|
||||
ax1 = fig.add_subplot(gs1[0:k1, 0:1])
|
||||
ax2 = fig.add_subplot(gs1[k1:, 0:1], sharex=ax1)
|
||||
sns.barplot(
|
||||
data=merged,
|
||||
x="Speedup",
|
||||
y="Name",
|
||||
color="tab:red",
|
||||
y="Instance",
|
||||
color="tab:purple",
|
||||
capsize=0.15,
|
||||
errcolor="k",
|
||||
errwidth=1.25,
|
||||
ax=ax1,
|
||||
)
|
||||
plt.axvline(1.0, linestyle="--", color="k")
|
||||
plt.tight_layout()
|
||||
sns.barplot(
|
||||
data=merged,
|
||||
x="Speedup",
|
||||
y="Group",
|
||||
color="tab:purple",
|
||||
capsize=0.15,
|
||||
errcolor="k",
|
||||
errwidth=1.25,
|
||||
ax=ax2,
|
||||
)
|
||||
ax1.axvline(1.0, linestyle="--", color="k")
|
||||
ax2.axvline(1.0, linestyle="--", color="k")
|
||||
|
||||
print("Writing tables/compare.png")
|
||||
plt.savefig("tables/compare.png", dpi=150)
|
||||
|
||||
print("Writing tables/compare.csv")
|
||||
merged.loc[
|
||||
:,
|
||||
[
|
||||
"Group",
|
||||
"Instance",
|
||||
"Sample",
|
||||
"A Time (s)",
|
||||
"B Time (s)",
|
||||
"Speedup",
|
||||
"Time diff (s)",
|
||||
"A Value",
|
||||
"B Value",
|
||||
"Value diff (%)",
|
||||
],
|
||||
].to_csv("tables/compare.csv", index_label="Index")
|
||||
merged.to_csv("tables/compare.csv", index_label="Index")
|
||||
|
||||
@@ -9,8 +9,7 @@ from tabulate import tabulate
|
||||
|
||||
|
||||
def process_all_log_files():
|
||||
pathlist = list(Path(".").glob("results/*/*/*.log"))
|
||||
pathlist += list(Path(".").glob("results/*/*.log"))
|
||||
pathlist = list(Path(".").glob("results/**/*.log"))
|
||||
rows = []
|
||||
for path in pathlist:
|
||||
if ".ipy" in str(path):
|
||||
@@ -26,9 +25,9 @@ def process_all_log_files():
|
||||
|
||||
def process(filename):
|
||||
parts = filename.replace(".log", "").split("/")
|
||||
group_name = "/".join(parts[1:-1])
|
||||
instance_name = parts[-1]
|
||||
instance_name, sample_name = instance_name.split(".")
|
||||
group_name = parts[1]
|
||||
instance_name = "/".join(parts[2:-1])
|
||||
sample_name = parts[-1]
|
||||
nodes = 0.0
|
||||
optimize_time = 0.0
|
||||
simplex_iterations = 0.0
|
||||
@@ -174,28 +173,37 @@ def process(filename):
|
||||
|
||||
def generate_chart():
|
||||
import pandas as pd
|
||||
import matplotlib
|
||||
import matplotlib.pyplot as plt
|
||||
import seaborn as sns
|
||||
|
||||
matplotlib.use("Agg")
|
||||
sns.set("talk")
|
||||
sns.set_palette(
|
||||
[
|
||||
"#9b59b6",
|
||||
"#3498db",
|
||||
"#95a5a6",
|
||||
"#e74c3c",
|
||||
"#34495e",
|
||||
"#2ecc71",
|
||||
]
|
||||
)
|
||||
|
||||
tables = []
|
||||
files = ["tables/benchmark.csv"]
|
||||
for f in files:
|
||||
table = pd.read_csv(f, index_col=0)
|
||||
table.loc[:, "Instance"] = (
|
||||
table.loc[:, "Group"] + "/" + table.loc[:, "Instance"]
|
||||
)
|
||||
table.loc[:, "Filename"] = f
|
||||
tables += [table]
|
||||
benchmark = pd.concat(tables, sort=True)
|
||||
benchmark = benchmark.sort_values(by="Instance")
|
||||
k = len(benchmark.groupby("Instance"))
|
||||
plt.figure(figsize=(12, 0.50 * k))
|
||||
sns.set_style("whitegrid")
|
||||
sns.set_palette("Set1")
|
||||
plt.figure(figsize=(12, k))
|
||||
sns.barplot(
|
||||
y="Instance",
|
||||
x="Total time (s)",
|
||||
color="tab:red",
|
||||
hue="Group",
|
||||
capsize=0.15,
|
||||
errcolor="k",
|
||||
errwidth=1.25,
|
||||
|
||||
Reference in New Issue
Block a user