Start implementation of circular model

This commit is contained in:
2023-11-28 10:26:07 -06:00
parent 84bd25b04d
commit 74759bd602
106 changed files with 235 additions and 39025 deletions

View File

@@ -1,63 +0,0 @@
# RELOG: Reverse Logistics Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
using DataStructures
using JSON
using JSONSchema
using Printf
using Statistics
"""
_compress(instance::Instance)
Create a single-period instance from a multi-period one. Specifically,
replaces every time-dependent attribute, such as initial_amounts,
by a list with a single element, which is either a sum, an average,
or something else that makes sense to that specific attribute.
"""
function _compress(instance::Instance)::Instance
T = instance.time
compressed = deepcopy(instance)
compressed.time = 1
compressed.building_period = [1]
# Compress products
for p in compressed.products
p.acquisition_cost = [mean(p.acquisition_cost)]
p.disposal_cost = [mean(p.disposal_cost)]
p.disposal_limit = [sum(p.disposal_limit)]
p.transportation_cost = [mean(p.transportation_cost)]
p.transportation_energy = [mean(p.transportation_energy)]
for (emission_name, emission_value) in p.transportation_emissions
p.transportation_emissions[emission_name] = [mean(emission_value)]
end
end
# Compress collection centers
for c in compressed.collection_centers
c.amount = [maximum(c.amount) * T]
end
# Compress plants
for plant in compressed.plants
plant.energy = [mean(plant.energy)]
for (emission_name, emission_value) in plant.emissions
plant.emissions[emission_name] = [mean(emission_value)]
end
for s in plant.sizes
s.capacity *= T
s.variable_operating_cost = [mean(s.variable_operating_cost)]
s.opening_cost = [s.opening_cost[1]]
s.fixed_operating_cost = [sum(s.fixed_operating_cost)]
end
for (prod_name, disp_limit) in plant.disposal_limit
plant.disposal_limit[prod_name] = [sum(disp_limit)]
end
for (prod_name, disp_cost) in plant.disposal_cost
plant.disposal_cost[prod_name] = [mean(disp_cost)]
end
end
return compressed
end

View File

@@ -1,212 +0,0 @@
# RELOG: Reverse Logistics Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
using CRC
using CSV
using DataFrames
using Shapefile
using Statistics
using ZipFile
using ProgressBars
using OrderedCollections
import Downloads: download
import Base: parse
crc32 = crc(CRC_32)
struct GeoPoint
lat::Float64
lon::Float64
end
struct GeoRegion
centroid::GeoPoint
population::Int
GeoRegion(; centroid, population) = new(centroid, population)
end
DB_CACHE = Dict{String,Dict{String,GeoRegion}}()
function centroid(geom::Shapefile.Polygon)::GeoPoint
x_max, x_min, y_max, y_min = -Inf, Inf, -Inf, Inf
for p in geom.points
x_max = max(x_max, p.x)
x_min = min(x_min, p.x)
y_max = max(y_max, p.y)
y_min = min(y_min, p.y)
end
x_center = (x_max + x_min) / 2.0
y_center = (y_max + y_min) / 2.0
return GeoPoint(round(y_center, digits = 5), round(x_center, digits = 5))
end
function _download_file(url, output, expected_crc32)::Nothing
if isfile(output)
return
end
mkpath(dirname(output))
@info "Downloading: $url"
fname = download(url)
actual_crc32 = open(crc32, fname)
expected_crc32 == actual_crc32 || error("CRC32 mismatch")
cp(fname, output)
return
end
function _download_zip(url, outputdir, expected_output_file, expected_crc32)::Nothing
if isfile(expected_output_file)
return
end
mkpath(outputdir)
@info "Downloading: $url"
zip_filename = download(url)
actual_crc32 = open(crc32, zip_filename)
expected_crc32 == actual_crc32 || error("CRC32 mismatch")
open(zip_filename) do zip_file
zr = ZipFile.Reader(zip_file)
for file in zr.files
open(joinpath(outputdir, file.name), "w") do output_file
write(output_file, read(file))
end
end
end
return
end
function _geodb_load_gov_census(;
db_name,
extract_cols,
shp_crc32,
shp_filename,
shp_url,
population_url,
population_crc32,
population_col,
population_preprocess,
population_join,
)::Dict{String,GeoRegion}
basedir = joinpath(dirname(@__FILE__), "..", "..", "data", db_name)
csv_filename = "$basedir/locations.csv"
if !isfile(csv_filename)
# Download required files
_download_zip(shp_url, basedir, joinpath(basedir, shp_filename), shp_crc32)
_download_file(population_url, "$basedir/population.csv", population_crc32)
# Read shapefile
@info "Processing: $shp_filename"
table = Shapefile.Table(joinpath(basedir, shp_filename))
geoms = Shapefile.shapes(table)
# Build empty dataframe
df = DataFrame()
cols = extract_cols(table, 1)
for k in keys(cols)
df[!, k] = []
end
df[!, "latitude"] = Float64[]
df[!, "longitude"] = Float64[]
# Add regions to dataframe
for (i, geom) in tqdm(enumerate(geoms))
c = centroid(geom)
cols = extract_cols(table, i)
push!(df, [values(cols)..., c.lat, c.lon])
end
sort!(df)
# Join with population data
population = DataFrame(CSV.File("$basedir/population.csv"))
population_preprocess(population)
population = population[:, [population_join, population_col]]
rename!(population, population_col => "population")
df = leftjoin(df, population, on = population_join)
# Write output
CSV.write(csv_filename, df)
end
if db_name keys(DB_CACHE)
csv = CSV.File(csv_filename)
DB_CACHE[db_name] = Dict(
string(row.id) => GeoRegion(
centroid = GeoPoint(row.latitude, row.longitude),
population = (row.population === missing ? 0 : row.population),
) for row in csv
)
end
return DB_CACHE[db_name]
end
# 2018 US counties
# -----------------------------------------------------------------------------
function _extract_cols_2018_us_county(
table::Shapefile.Table,
i::Int,
)::OrderedDict{String,Any}
return OrderedDict(
"id" => table.STATEFP[i] * table.COUNTYFP[i],
"statefp" => table.STATEFP[i],
"countyfp" => table.COUNTYFP[i],
"name" => table.NAME[i],
)
end
function _population_preprocess_2018_us_county(df)
df[!, "id"] = [@sprintf("%02d%03d", row.STATE, row.COUNTY) for row in eachrow(df)]
end
function _geodb_load_2018_us_county()::Dict{String,GeoRegion}
return _geodb_load_gov_census(
db_name = "2018-us-county",
extract_cols = _extract_cols_2018_us_county,
shp_crc32 = 0x83eaec6d,
shp_filename = "cb_2018_us_county_500k.shp",
shp_url = "https://www2.census.gov/geo/tiger/GENZ2018/shp/cb_2018_us_county_500k.zip",
population_url = "https://www2.census.gov/programs-surveys/popest/datasets/2010-2019/counties/totals/co-est2019-alldata.csv",
population_crc32 = 0xf85b0405,
population_col = "POPESTIMATE2019",
population_join = "id",
population_preprocess = _population_preprocess_2018_us_county,
)
end
# US States
# -----------------------------------------------------------------------------
function _extract_cols_us_state(table::Shapefile.Table, i::Int)::OrderedDict{String,Any}
return OrderedDict(
"id" => table.STUSPS[i],
"statefp" => parse(Int, table.STATEFP[i]),
"name" => table.NAME[i],
)
end
function _population_preprocess_us_state(df)
rename!(df, "STATE" => "statefp")
end
function _geodb_load_us_state()::Dict{String,GeoRegion}
return _geodb_load_gov_census(
db_name = "us-state",
extract_cols = _extract_cols_us_state,
shp_crc32 = 0x9469e5ca,
shp_filename = "cb_2018_us_state_500k.shp",
shp_url = "https://www2.census.gov/geo/tiger/GENZ2018/shp/cb_2018_us_state_500k.zip",
population_url = "http://www2.census.gov/programs-surveys/popest/datasets/2010-2019/national/totals/nst-est2019-alldata.csv",
population_crc32 = 0x191cc64c,
population_col = "POPESTIMATE2019",
population_join = "statefp",
population_preprocess = _population_preprocess_us_state,
)
end
function geodb_load(db_name::AbstractString)::Dict{String,GeoRegion}
db_name == "2018-us-county" && return _geodb_load_2018_us_county()
db_name == "us-state" && return _geodb_load_us_state()
error("Unknown database: $db_name")
end
function geodb_query(name)::GeoRegion
db_name, id = split(name, ":")
return geodb_load(db_name)[id]
end

View File

@@ -1,233 +1,76 @@
# RELOG: Reverse Logistics Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
using DataStructures
using JSON
using JSONSchema
using Printf
using Statistics
using OrderedCollections
function parsefile(path::String)::Instance
return RELOG.parse(JSON.parsefile(path))
return RELOG.parse(JSON.parsefile(path, dicttype = () -> OrderedDict()))
end
function parse(json)::Instance
basedir = dirname(@__FILE__)
json_schema = JSON.parsefile("$basedir/../schemas/input.json")
validate(json, Schema(json_schema))
# Read parameters
time_horizon = json["parameters"]["time horizon (years)"]
building_period = json["parameters"]["building period (years)"]
distance_metric = json["parameters"]["distance metric"]
T = json["parameters"]["time horizon (years)"]
json_schema["definitions"]["TimeSeries"]["minItems"] = T
json_schema["definitions"]["TimeSeries"]["maxItems"] = T
validate(json, Schema(json_schema))
building_period = [1]
if "building period (years)" in keys(json["parameters"])
building_period = json["parameters"]["building period (years)"]
end
distance_metric = EuclideanDistance()
if "distance metric" in keys(json["parameters"])
metric_name = json["parameters"]["distance metric"]
if metric_name == "driving"
distance_metric = KnnDrivingDistance()
elseif metric_name == "Euclidean"
# nop
else
error("Unknown distance metric: $metric_name")
end
end
plants = Plant[]
# Read products
products = Product[]
collection_centers = CollectionCenter[]
prod_name_to_product = Dict{String,Product}()
products_by_name = OrderedDict{String,Product}()
for (pname, pdict) in json["products"]
tr_cost = pdict["transportation cost (\$/km/tonne)"]
tr_energy = pdict["transportation energy (J/km/tonne)"]
tr_emissions = pdict["transportation emissions (tonne/km/tonne)"]
prod = Product(; name = pname, tr_cost, tr_energy, tr_emissions)
push!(products, prod)
products_by_name[pname] = prod
end
# Create products
for (product_name, product_dict) in json["products"]
cost = product_dict["transportation cost (\$/km/tonne)"]
energy = zeros(T)
emissions = Dict()
disposal_limit = zeros(T)
disposal_cost = zeros(T)
acquisition_cost = zeros(T)
if "transportation energy (J/km/tonne)" in keys(product_dict)
energy = product_dict["transportation energy (J/km/tonne)"]
# Read centers
centers = Center[]
centers_by_name = OrderedDict{String,Center}()
for (cname, cdict) in json["centers"]
latitude = cdict["latitude (deg)"]
longitude = cdict["longitude (deg)"]
input = nothing
revenue = [0.0 for t = 1:time_horizon]
if cdict["input"] !== nothing
input = products_by_name[cdict["input"]]
revenue = cdict["revenue (\$/tonne)"]
end
if "transportation emissions (tonne/km/tonne)" in keys(product_dict)
emissions = product_dict["transportation emissions (tonne/km/tonne)"]
end
if "disposal limit (tonne)" in keys(product_dict)
disposal_limit = product_dict["disposal limit (tonne)"]
end
if "disposal cost (\$/tonne)" in keys(product_dict)
disposal_cost = product_dict["disposal cost (\$/tonne)"]
end
if "acquisition cost (\$/tonne)" in keys(product_dict)
acquisition_cost = product_dict["acquisition cost (\$/tonne)"]
end
prod_centers = []
product = Product(
acquisition_cost = acquisition_cost,
collection_centers = prod_centers,
disposal_cost = disposal_cost,
disposal_limit = disposal_limit,
name = product_name,
transportation_cost = cost,
transportation_emissions = emissions,
transportation_energy = energy,
outputs = [products_by_name[p] for p in cdict["outputs"]]
operating_cost = cdict["operating cost (\$)"]
prod_dict(key, null_val) = OrderedDict(
p => [v === nothing ? null_val : v for v in cdict[key][p.name]] for
p in outputs
)
push!(products, product)
prod_name_to_product[product_name] = product
fixed_output = prod_dict("fixed output (tonne)", 0.0)
var_output = prod_dict("variable output (tonne/tonne)", 0.0)
collection_cost = prod_dict("collection cost (\$/tonne)", 0.0)
disposal_limit = prod_dict("disposal limit (tonne)", Inf)
disposal_cost = prod_dict("disposal cost (\$/tonne)", 0.0)
# Create collection centers
if "initial amounts" in keys(product_dict)
for (center_name, center_dict) in product_dict["initial amounts"]
if "location" in keys(center_dict)
region = geodb_query(center_dict["location"])
center_dict["latitude (deg)"] = region.centroid.lat
center_dict["longitude (deg)"] = region.centroid.lon
end
center = CollectionCenter(
amount = center_dict["amount (tonne)"],
index = length(collection_centers) + 1,
latitude = center_dict["latitude (deg)"],
longitude = center_dict["longitude (deg)"],
name = center_name,
product = product,
)
push!(prod_centers, center)
push!(collection_centers, center)
end
end
center = Center(;
latitude,
longitude,
input,
outputs,
revenue,
operating_cost,
fixed_output,
var_output,
collection_cost,
disposal_cost,
disposal_limit,
)
push!(centers, center)
centers_by_name[cname] = center
end
# Create plants
for (plant_name, plant_dict) in json["plants"]
input = prod_name_to_product[plant_dict["input"]]
output = Dict()
# Plant outputs
if "outputs (tonne/tonne)" in keys(plant_dict)
output = Dict(
prod_name_to_product[key] => value for
(key, value) in plant_dict["outputs (tonne/tonne)"] if value > 0
)
end
energy = zeros(T)
emissions = Dict()
if "energy (GJ/tonne)" in keys(plant_dict)
energy = plant_dict["energy (GJ/tonne)"]
end
if "emissions (tonne/tonne)" in keys(plant_dict)
emissions = plant_dict["emissions (tonne/tonne)"]
end
for (location_name, location_dict) in plant_dict["locations"]
sizes = PlantSize[]
disposal_limit = Dict(p => [0.0 for t = 1:T] for p in keys(output))
disposal_cost = Dict(p => [0.0 for t = 1:T] for p in keys(output))
# GeoDB
if "location" in keys(location_dict)
region = geodb_query(location_dict["location"])
location_dict["latitude (deg)"] = region.centroid.lat
location_dict["longitude (deg)"] = region.centroid.lon
end
# Disposal
if "disposal" in keys(location_dict)
for (product_name, disposal_dict) in location_dict["disposal"]
limit = [1e8 for t = 1:T]
if "limit (tonne)" in keys(disposal_dict)
limit = disposal_dict["limit (tonne)"]
end
disposal_limit[prod_name_to_product[product_name]] = limit
disposal_cost[prod_name_to_product[product_name]] =
disposal_dict["cost (\$/tonne)"]
end
end
# Capacities
for (capacity_name, capacity_dict) in location_dict["capacities (tonne)"]
push!(
sizes,
PlantSize(
capacity = Base.parse(Float64, capacity_name),
fixed_operating_cost = capacity_dict["fixed operating cost (\$)"],
opening_cost = capacity_dict["opening cost (\$)"],
variable_operating_cost = capacity_dict["variable operating cost (\$/tonne)"],
),
)
end
length(sizes) > 1 || push!(sizes, deepcopy(sizes[1]))
sort!(sizes, by = x -> x.capacity)
# Initial capacity
initial_capacity = 0
if "initial capacity (tonne)" in keys(location_dict)
initial_capacity = location_dict["initial capacity (tonne)"]
end
# Storage
storage_limit = 0
storage_cost = zeros(T)
if "storage" in keys(location_dict)
storage_dict = location_dict["storage"]
storage_limit = storage_dict["limit (tonne)"]
storage_cost = storage_dict["cost (\$/tonne)"]
end
# Validation: Capacities
if length(sizes) != 2
throw("At most two capacities are supported")
end
if sizes[1].variable_operating_cost != sizes[2].variable_operating_cost
throw("Variable operating costs must be the same for all capacities")
end
plant = Plant(
disposal_cost = disposal_cost,
disposal_limit = disposal_limit,
emissions = emissions,
energy = energy,
index = length(plants) + 1,
initial_capacity = initial_capacity,
input = input,
latitude = location_dict["latitude (deg)"],
location_name = location_name,
longitude = location_dict["longitude (deg)"],
output = output,
plant_name = plant_name,
sizes = sizes,
storage_cost = storage_cost,
storage_limit = storage_limit,
)
push!(plants, plant)
end
end
@info @sprintf("%12d collection centers", length(collection_centers))
@info @sprintf("%12d candidate plant locations", length(plants))
return Instance(
time = T,
products = products,
collection_centers = collection_centers,
plants = plants,
building_period = building_period,
distance_metric = distance_metric,
return Instance(;
time_horizon,
building_period,
distance_metric,
products,
products_by_name,
centers,
centers_by_name,
)
end

View File

@@ -1,73 +1,32 @@
# RELOG: Reverse Logistics Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
using OrderedCollections
using DataStructures
using JSON
using JSONSchema
using Printf
using Statistics
Base.@kwdef mutable struct Product
acquisition_cost::Vector{Float64}
collection_centers::Vector
disposal_cost::Vector{Float64}
disposal_limit::Vector{Float64}
Base.@kwdef struct Product
name::String
transportation_cost::Vector{Float64}
transportation_emissions::Dict{String,Vector{Float64}}
transportation_energy::Vector{Float64}
tr_cost::Vector{Float64}
tr_energy::Vector{Float64}
tr_emissions::OrderedDict{String,Vector{Float64}}
end
Base.@kwdef mutable struct CollectionCenter
amount::Vector{Float64}
index::Int64
Base.@kwdef struct Center
latitude::Float64
longitude::Float64
name::String
product::Product
input::Union{Product,Nothing}
outputs::Vector{Product}
fixed_output::OrderedDict{Product,Vector{Float64}}
var_output::OrderedDict{Product,Vector{Float64}}
revenue::Vector{Float64}
collection_cost::OrderedDict{Product,Vector{Float64}}
operating_cost::Vector{Float64}
disposal_limit::OrderedDict{Product,Vector{Float64}}
disposal_cost::OrderedDict{Product,Vector{Float64}}
end
Base.@kwdef mutable struct PlantSize
capacity::Float64
fixed_operating_cost::Vector{Float64}
opening_cost::Vector{Float64}
variable_operating_cost::Vector{Float64}
end
Base.@kwdef mutable struct Plant
disposal_cost::Dict{Product,Vector{Float64}}
disposal_limit::Dict{Product,Vector{Float64}}
emissions::Dict{String,Vector{Float64}}
energy::Vector{Float64}
index::Int64
initial_capacity::Float64
input::Product
latitude::Float64
location_name::String
longitude::Float64
output::Dict{Product,Float64}
plant_name::String
sizes::Vector{PlantSize}
storage_cost::Vector{Float64}
storage_limit::Float64
end
abstract type DistanceMetric end
Base.@kwdef mutable struct KnnDrivingDistance <: DistanceMetric
tree = nothing
ratios = nothing
end
mutable struct EuclideanDistance <: DistanceMetric end
Base.@kwdef mutable struct Instance
building_period::Vector{Int64}
collection_centers::Vector{CollectionCenter}
distance_metric::DistanceMetric
plants::Vector{Plant}
Base.@kwdef struct Instance
building_period::Vector{Int}
centers_by_name::OrderedDict{String,Center}
centers::Vector{Center}
distance_metric::String
products_by_name::OrderedDict{String,Product}
products::Vector{Product}
time::Int64
time_horizon::Int
end

View File

@@ -1,21 +0,0 @@
# RELOG: Reverse Logistics Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
using DataStructures
using JSON
using JSONSchema
using Printf
using Statistics
function validate(json, schema)
result = JSONSchema.validate(json, schema)
if result !== nothing
if result isa JSONSchema.SingleIssue
msg = "$(result.reason) in $(result.path)"
else
msg = convert(String, result)
end
throw("Error parsing input file: $(msg)")
end
end