mirror of
https://github.com/ANL-CEEESA/MIPLearn.git
synced 2025-12-07 01:48:51 -06:00
MIPLearn v0.3
This commit is contained in:
@@ -1,3 +1,3 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
|
||||
# Copyright (C) 2020-2022, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
146
miplearn/problems/binpack.py
Normal file
146
miplearn/problems/binpack.py
Normal file
@@ -0,0 +1,146 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020-2022, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import List, Optional, Union
|
||||
|
||||
import gurobipy as gp
|
||||
import numpy as np
|
||||
from gurobipy import GRB, quicksum
|
||||
from scipy.stats import uniform, randint
|
||||
from scipy.stats.distributions import rv_frozen
|
||||
|
||||
from miplearn.io import read_pkl_gz
|
||||
from miplearn.solvers.gurobi import GurobiModel
|
||||
|
||||
|
||||
@dataclass
|
||||
class BinPackData:
|
||||
"""Data for the bin packing problem.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
sizes
|
||||
Sizes of the items
|
||||
capacity
|
||||
Capacity of the bin
|
||||
"""
|
||||
|
||||
sizes: np.ndarray
|
||||
capacity: int
|
||||
|
||||
|
||||
class BinPackGenerator:
|
||||
"""Random instance generator for the bin packing problem.
|
||||
|
||||
If `fix_items=False`, the class samples the user-provided probability distributions
|
||||
n, sizes and capacity to decide, respectively, the number of items, the sizes of
|
||||
the items and capacity of the bin. All values are sampled independently.
|
||||
|
||||
If `fix_items=True`, the class creates a reference instance, using the method
|
||||
previously described, then generates additional instances by perturbing its item
|
||||
sizes and bin capacity. More specifically, the sizes of the items are set to `s_i
|
||||
* gamma_i` where `s_i` is the size of the i-th item in the reference instance and
|
||||
`gamma_i` is sampled from `sizes_jitter`. Similarly, the bin capacity is set to `B *
|
||||
beta`, where `B` is the reference bin capacity and `beta` is sampled from
|
||||
`capacity_jitter`. The number of items remains the same across all generated
|
||||
instances.
|
||||
|
||||
Args
|
||||
----
|
||||
n
|
||||
Probability distribution for the number of items.
|
||||
sizes
|
||||
Probability distribution for the item sizes.
|
||||
capacity
|
||||
Probability distribution for the bin capacity.
|
||||
sizes_jitter
|
||||
Probability distribution for the item size randomization.
|
||||
capacity_jitter
|
||||
Probability distribution for the bin capacity.
|
||||
fix_items
|
||||
If `True`, generates a reference instance, then applies some perturbation to it.
|
||||
If `False`, generates completely different instances.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
n: rv_frozen,
|
||||
sizes: rv_frozen,
|
||||
capacity: rv_frozen,
|
||||
sizes_jitter: rv_frozen,
|
||||
capacity_jitter: rv_frozen,
|
||||
fix_items: bool,
|
||||
) -> None:
|
||||
self.n = n
|
||||
self.sizes = sizes
|
||||
self.capacity = capacity
|
||||
self.sizes_jitter = sizes_jitter
|
||||
self.capacity_jitter = capacity_jitter
|
||||
self.fix_items = fix_items
|
||||
self.ref_data: Optional[BinPackData] = None
|
||||
|
||||
def generate(self, n_samples: int) -> List[BinPackData]:
|
||||
"""Generates random instances.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
n_samples
|
||||
Number of samples to generate.
|
||||
"""
|
||||
|
||||
def _sample() -> BinPackData:
|
||||
if self.ref_data is None:
|
||||
n = self.n.rvs()
|
||||
sizes = self.sizes.rvs(n)
|
||||
capacity = self.capacity.rvs()
|
||||
if self.fix_items:
|
||||
self.ref_data = BinPackData(sizes, capacity)
|
||||
else:
|
||||
n = self.ref_data.sizes.shape[0]
|
||||
sizes = self.ref_data.sizes
|
||||
capacity = self.ref_data.capacity
|
||||
|
||||
sizes = sizes * self.sizes_jitter.rvs(n)
|
||||
capacity = capacity * self.capacity_jitter.rvs()
|
||||
return BinPackData(sizes.round(2), capacity.round(2))
|
||||
|
||||
return [_sample() for n in range(n_samples)]
|
||||
|
||||
|
||||
def build_binpack_model(data: Union[str, BinPackData]) -> GurobiModel:
|
||||
"""Converts bin packing problem data into a concrete Gurobipy model."""
|
||||
if isinstance(data, str):
|
||||
data = read_pkl_gz(data)
|
||||
assert isinstance(data, BinPackData)
|
||||
|
||||
model = gp.Model()
|
||||
n = data.sizes.shape[0]
|
||||
|
||||
# Var: Use bin
|
||||
y = model.addVars(n, name="y", vtype=GRB.BINARY)
|
||||
|
||||
# Var: Assign item to bin
|
||||
x = model.addVars(n, n, name="x", vtype=GRB.BINARY)
|
||||
|
||||
# Obj: Minimize number of bins
|
||||
model.setObjective(quicksum(y[i] for i in range(n)))
|
||||
|
||||
# Eq: Enforce bin capacity
|
||||
model.addConstrs(
|
||||
(
|
||||
quicksum(data.sizes[i] * x[i, j] for i in range(n)) <= data.capacity * y[j]
|
||||
for j in range(n)
|
||||
),
|
||||
name="eq_capacity",
|
||||
)
|
||||
|
||||
# Eq: Must assign all items to bins
|
||||
model.addConstrs(
|
||||
(quicksum(x[i, j] for j in range(n)) == 1 for i in range(n)),
|
||||
name="eq_assign",
|
||||
)
|
||||
|
||||
model.update()
|
||||
return GurobiModel(model)
|
||||
@@ -1,230 +0,0 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import List, Dict, Optional
|
||||
|
||||
import numpy as np
|
||||
import pyomo.environ as pe
|
||||
from overrides import overrides
|
||||
from scipy.stats import uniform, randint, rv_discrete
|
||||
from scipy.stats.distributions import rv_frozen
|
||||
|
||||
from miplearn.instance.base import Instance
|
||||
|
||||
|
||||
@dataclass
|
||||
class MultiKnapsackData:
|
||||
prices: np.ndarray
|
||||
capacities: np.ndarray
|
||||
weights: np.ndarray
|
||||
|
||||
|
||||
class MultiKnapsackInstance(Instance):
|
||||
"""Representation of the Multidimensional 0-1 Knapsack Problem.
|
||||
|
||||
Given a set of n items and m knapsacks, the problem is to find a subset of items
|
||||
S maximizing sum(prices[i] for i in S). If selected, each item i occupies
|
||||
weights[i,j] units of space in each knapsack j. Furthermore, each knapsack j has
|
||||
limited storage space, given by capacities[j].
|
||||
|
||||
This implementation assigns a different category for each decision variable,
|
||||
and therefore trains one ML model per variable. It is only suitable when training
|
||||
and test instances have same size and items don't shuffle around.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
prices: np.ndarray,
|
||||
capacities: np.ndarray,
|
||||
weights: np.ndarray,
|
||||
) -> None:
|
||||
super().__init__()
|
||||
assert isinstance(prices, np.ndarray)
|
||||
assert isinstance(capacities, np.ndarray)
|
||||
assert isinstance(weights, np.ndarray)
|
||||
assert len(weights.shape) == 2
|
||||
self.m, self.n = weights.shape
|
||||
assert prices.shape == (self.n,)
|
||||
assert capacities.shape == (self.m,)
|
||||
self.prices = prices
|
||||
self.capacities = capacities
|
||||
self.weights = weights
|
||||
|
||||
@overrides
|
||||
def to_model(self) -> pe.ConcreteModel:
|
||||
model = pe.ConcreteModel()
|
||||
model.x = pe.Var(range(self.n), domain=pe.Binary)
|
||||
model.OBJ = pe.Objective(
|
||||
expr=sum(-model.x[j] * self.prices[j] for j in range(self.n)),
|
||||
sense=pe.minimize,
|
||||
)
|
||||
model.eq_capacity = pe.ConstraintList()
|
||||
for i in range(self.m):
|
||||
model.eq_capacity.add(
|
||||
sum(model.x[j] * self.weights[i, j] for j in range(self.n))
|
||||
<= self.capacities[i]
|
||||
)
|
||||
|
||||
return model
|
||||
|
||||
|
||||
# noinspection PyPep8Naming
|
||||
class MultiKnapsackGenerator:
|
||||
def __init__(
|
||||
self,
|
||||
n: rv_frozen = randint(low=100, high=101),
|
||||
m: rv_frozen = randint(low=30, high=31),
|
||||
w: rv_frozen = randint(low=0, high=1000),
|
||||
K: rv_frozen = randint(low=500, high=501),
|
||||
u: rv_frozen = uniform(loc=0.0, scale=1.0),
|
||||
alpha: rv_frozen = uniform(loc=0.25, scale=0.0),
|
||||
fix_w: bool = False,
|
||||
w_jitter: rv_frozen = uniform(loc=1.0, scale=0.0),
|
||||
p_jitter: rv_frozen = uniform(loc=1.0, scale=0.0),
|
||||
round: bool = True,
|
||||
):
|
||||
"""Initialize the problem generator.
|
||||
|
||||
Instances have a random number of items (or variables) and a random number of
|
||||
knapsacks (or constraints), as specified by the provided probability
|
||||
distributions `n` and `m`, respectively. The weight of each item `i` on
|
||||
knapsack `j` is sampled independently from the provided distribution `w`. The
|
||||
capacity of knapsack `j` is set to:
|
||||
|
||||
alpha_j * sum(w[i,j] for i in range(n)),
|
||||
|
||||
where `alpha_j`, the tightness ratio, is sampled from the provided
|
||||
probability distribution `alpha`. To make the instances more challenging,
|
||||
the costs of the items are linearly correlated to their average weights. More
|
||||
specifically, the weight of each item `i` is set to:
|
||||
|
||||
sum(w[i,j]/m for j in range(m)) + K * u_i,
|
||||
|
||||
where `K`, the correlation coefficient, and `u_i`, the correlation
|
||||
multiplier, are sampled from the provided probability distributions. Note
|
||||
that `K` is only sample once for the entire instance.
|
||||
|
||||
If fix_w=True is provided, then w[i,j] are kept the same in all generated
|
||||
instances. This also implies that n and m are kept fixed. Although the prices
|
||||
and capacities are derived from w[i,j], as long as u and K are not constants,
|
||||
the generated instances will still not be completely identical.
|
||||
|
||||
If a probability distribution w_jitter is provided, then item weights will be
|
||||
set to w[i,j] * gamma[i,j] where gamma[i,j] is sampled from w_jitter. When
|
||||
combined with fix_w=True, this argument may be used to generate instances
|
||||
where the weight of each item is roughly the same, but not exactly identical,
|
||||
across all instances. The prices of the items and the capacities of the
|
||||
knapsacks will be calculated as above, but using these perturbed weights
|
||||
instead.
|
||||
|
||||
By default, all generated prices, weights and capacities are rounded to the
|
||||
nearest integer number. If `round=False` is provided, this rounding will be
|
||||
disabled.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
n: rv_discrete
|
||||
Probability distribution for the number of items (or variables)
|
||||
m: rv_discrete
|
||||
Probability distribution for the number of knapsacks (or constraints)
|
||||
w: rv_continuous
|
||||
Probability distribution for the item weights
|
||||
K: rv_continuous
|
||||
Probability distribution for the profit correlation coefficient
|
||||
u: rv_continuous
|
||||
Probability distribution for the profit multiplier
|
||||
alpha: rv_continuous
|
||||
Probability distribution for the tightness ratio
|
||||
fix_w: boolean
|
||||
If true, weights are kept the same (minus the noise from w_jitter) in all
|
||||
instances
|
||||
w_jitter: rv_continuous
|
||||
Probability distribution for random noise added to the weights
|
||||
round: boolean
|
||||
If true, all prices, weights and capacities are rounded to the nearest
|
||||
integer
|
||||
"""
|
||||
assert isinstance(n, rv_frozen), "n should be a SciPy probability distribution"
|
||||
assert isinstance(m, rv_frozen), "m should be a SciPy probability distribution"
|
||||
assert isinstance(w, rv_frozen), "w should be a SciPy probability distribution"
|
||||
assert isinstance(K, rv_frozen), "K should be a SciPy probability distribution"
|
||||
assert isinstance(u, rv_frozen), "u should be a SciPy probability distribution"
|
||||
assert isinstance(
|
||||
alpha, rv_frozen
|
||||
), "alpha should be a SciPy probability distribution"
|
||||
assert isinstance(fix_w, bool), "fix_w should be boolean"
|
||||
assert isinstance(
|
||||
w_jitter, rv_frozen
|
||||
), "w_jitter should be a SciPy probability distribution"
|
||||
|
||||
self.n = n
|
||||
self.m = m
|
||||
self.w = w
|
||||
self.u = u
|
||||
self.K = K
|
||||
self.alpha = alpha
|
||||
self.w_jitter = w_jitter
|
||||
self.p_jitter = p_jitter
|
||||
self.round = round
|
||||
self.fix_n: Optional[int] = None
|
||||
self.fix_m: Optional[int] = None
|
||||
self.fix_w: Optional[np.ndarray] = None
|
||||
self.fix_u: Optional[np.ndarray] = None
|
||||
self.fix_K: Optional[float] = None
|
||||
|
||||
if fix_w:
|
||||
self.fix_n = self.n.rvs()
|
||||
self.fix_m = self.m.rvs()
|
||||
self.fix_w = np.array([self.w.rvs(self.fix_n) for _ in range(self.fix_m)])
|
||||
self.fix_u = self.u.rvs(self.fix_n)
|
||||
self.fix_K = self.K.rvs()
|
||||
|
||||
def generate(self, n_samples: int) -> List[MultiKnapsackData]:
|
||||
def _sample() -> MultiKnapsackData:
|
||||
if self.fix_w is not None:
|
||||
assert self.fix_m is not None
|
||||
assert self.fix_n is not None
|
||||
assert self.fix_u is not None
|
||||
assert self.fix_K is not None
|
||||
n = self.fix_n
|
||||
m = self.fix_m
|
||||
w = self.fix_w
|
||||
u = self.fix_u
|
||||
K = self.fix_K
|
||||
else:
|
||||
n = self.n.rvs()
|
||||
m = self.m.rvs()
|
||||
w = np.array([self.w.rvs(n) for _ in range(m)])
|
||||
u = self.u.rvs(n)
|
||||
K = self.K.rvs()
|
||||
w = w * np.array([self.w_jitter.rvs(n) for _ in range(m)])
|
||||
alpha = self.alpha.rvs(m)
|
||||
p = np.array(
|
||||
[w[:, j].sum() / m + K * u[j] for j in range(n)]
|
||||
) * self.p_jitter.rvs(n)
|
||||
b = np.array([w[i, :].sum() * alpha[i] for i in range(m)])
|
||||
if self.round:
|
||||
p = p.round()
|
||||
b = b.round()
|
||||
w = w.round()
|
||||
return MultiKnapsackData(p, b, w)
|
||||
|
||||
return [_sample() for _ in range(n_samples)]
|
||||
|
||||
|
||||
def build_multiknapsack_model(data: MultiKnapsackData) -> pe.ConcreteModel:
|
||||
model = pe.ConcreteModel()
|
||||
m, n = data.weights.shape
|
||||
model.x = pe.Var(range(n), domain=pe.Binary)
|
||||
model.OBJ = pe.Objective(
|
||||
expr=sum(-model.x[j] * data.prices[j] for j in range(n)),
|
||||
sense=pe.minimize,
|
||||
)
|
||||
model.eq_capacity = pe.ConstraintList()
|
||||
for i in range(m):
|
||||
model.eq_capacity.add(
|
||||
sum(model.x[j] * data.weights[i, j] for j in range(n)) <= data.capacities[i]
|
||||
)
|
||||
return model
|
||||
189
miplearn/problems/multiknapsack.py
Normal file
189
miplearn/problems/multiknapsack.py
Normal file
@@ -0,0 +1,189 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020-2022, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import List, Optional, Union
|
||||
|
||||
import gurobipy as gp
|
||||
import numpy as np
|
||||
from gurobipy import GRB
|
||||
from scipy.stats import uniform, randint
|
||||
from scipy.stats.distributions import rv_frozen
|
||||
|
||||
from miplearn.io import read_pkl_gz
|
||||
from miplearn.solvers.gurobi import GurobiModel
|
||||
|
||||
|
||||
@dataclass
|
||||
class MultiKnapsackData:
|
||||
"""Data for the multi-dimensional knapsack problem
|
||||
|
||||
Args
|
||||
----
|
||||
prices
|
||||
Item prices.
|
||||
capacities
|
||||
Knapsack capacities.
|
||||
weights
|
||||
Matrix of item weights.
|
||||
"""
|
||||
|
||||
prices: np.ndarray
|
||||
capacities: np.ndarray
|
||||
weights: np.ndarray
|
||||
|
||||
|
||||
# noinspection PyPep8Naming
|
||||
class MultiKnapsackGenerator:
|
||||
"""Random instance generator for the multi-dimensional knapsack problem.
|
||||
|
||||
Instances have a random number of items (or variables) and a random number of
|
||||
knapsacks (or constraints), as specified by the provided probability
|
||||
distributions `n` and `m`, respectively. The weight of each item `i` on knapsack
|
||||
`j` is sampled independently from the provided distribution `w`. The capacity of
|
||||
knapsack `j` is set to ``alpha_j * sum(w[i,j] for i in range(n))``,
|
||||
where `alpha_j`, the tightness ratio, is sampled from the provided probability
|
||||
distribution `alpha`.
|
||||
|
||||
To make the instances more challenging, the costs of the items are linearly
|
||||
correlated to their average weights. More specifically, the weight of each item
|
||||
`i` is set to ``sum(w[i,j]/m for j in range(m)) + K * u_i``, where `K`,
|
||||
the correlation coefficient, and `u_i`, the correlation multiplier, are sampled
|
||||
from the provided probability distributions. Note that `K` is only sample once
|
||||
for the entire instance.
|
||||
|
||||
If `fix_w=True`, then `weights[i,j]` are kept the same in all generated
|
||||
instances. This also implies that n and m are kept fixed. Although the prices and
|
||||
capacities are derived from `weights[i,j]`, as long as `u` and `K` are not
|
||||
constants, the generated instances will still not be completely identical.
|
||||
|
||||
If a probability distribution `w_jitter` is provided, then item weights will be
|
||||
set to ``w[i,j] * gamma[i,j]`` where `gamma[i,j]` is sampled from `w_jitter`.
|
||||
When combined with `fix_w=True`, this argument may be used to generate instances
|
||||
where the weight of each item is roughly the same, but not exactly identical,
|
||||
across all instances. The prices of the items and the capacities of the knapsacks
|
||||
will be calculated as above, but using these perturbed weights instead.
|
||||
|
||||
By default, all generated prices, weights and capacities are rounded to the
|
||||
nearest integer number. If `round=False` is provided, this rounding will be
|
||||
disabled.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
n: rv_discrete
|
||||
Probability distribution for the number of items (or variables).
|
||||
m: rv_discrete
|
||||
Probability distribution for the number of knapsacks (or constraints).
|
||||
w: rv_continuous
|
||||
Probability distribution for the item weights.
|
||||
K: rv_continuous
|
||||
Probability distribution for the profit correlation coefficient.
|
||||
u: rv_continuous
|
||||
Probability distribution for the profit multiplier.
|
||||
alpha: rv_continuous
|
||||
Probability distribution for the tightness ratio.
|
||||
fix_w: boolean
|
||||
If true, weights are kept the same (minus the noise from w_jitter) in all
|
||||
instances.
|
||||
w_jitter: rv_continuous
|
||||
Probability distribution for random noise added to the weights.
|
||||
round: boolean
|
||||
If true, all prices, weights and capacities are rounded to the nearest
|
||||
integer.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
n: rv_frozen = randint(low=100, high=101),
|
||||
m: rv_frozen = randint(low=30, high=31),
|
||||
w: rv_frozen = randint(low=0, high=1000),
|
||||
K: rv_frozen = randint(low=500, high=501),
|
||||
u: rv_frozen = uniform(loc=0.0, scale=1.0),
|
||||
alpha: rv_frozen = uniform(loc=0.25, scale=0.0),
|
||||
fix_w: bool = False,
|
||||
w_jitter: rv_frozen = uniform(loc=1.0, scale=0.0),
|
||||
p_jitter: rv_frozen = uniform(loc=1.0, scale=0.0),
|
||||
round: bool = True,
|
||||
):
|
||||
assert isinstance(n, rv_frozen), "n should be a SciPy probability distribution"
|
||||
assert isinstance(m, rv_frozen), "m should be a SciPy probability distribution"
|
||||
assert isinstance(w, rv_frozen), "w should be a SciPy probability distribution"
|
||||
assert isinstance(K, rv_frozen), "K should be a SciPy probability distribution"
|
||||
assert isinstance(u, rv_frozen), "u should be a SciPy probability distribution"
|
||||
assert isinstance(
|
||||
alpha, rv_frozen
|
||||
), "alpha should be a SciPy probability distribution"
|
||||
assert isinstance(fix_w, bool), "fix_w should be boolean"
|
||||
assert isinstance(
|
||||
w_jitter, rv_frozen
|
||||
), "w_jitter should be a SciPy probability distribution"
|
||||
|
||||
self.n = n
|
||||
self.m = m
|
||||
self.w = w
|
||||
self.u = u
|
||||
self.K = K
|
||||
self.alpha = alpha
|
||||
self.w_jitter = w_jitter
|
||||
self.p_jitter = p_jitter
|
||||
self.round = round
|
||||
self.fix_n: Optional[int] = None
|
||||
self.fix_m: Optional[int] = None
|
||||
self.fix_w: Optional[np.ndarray] = None
|
||||
self.fix_u: Optional[np.ndarray] = None
|
||||
self.fix_K: Optional[float] = None
|
||||
|
||||
if fix_w:
|
||||
self.fix_n = self.n.rvs()
|
||||
self.fix_m = self.m.rvs()
|
||||
self.fix_w = np.array([self.w.rvs(self.fix_n) for _ in range(self.fix_m)])
|
||||
self.fix_u = self.u.rvs(self.fix_n)
|
||||
self.fix_K = self.K.rvs()
|
||||
|
||||
def generate(self, n_samples: int) -> List[MultiKnapsackData]:
|
||||
def _sample() -> MultiKnapsackData:
|
||||
if self.fix_w is not None:
|
||||
assert self.fix_m is not None
|
||||
assert self.fix_n is not None
|
||||
assert self.fix_u is not None
|
||||
assert self.fix_K is not None
|
||||
n = self.fix_n
|
||||
m = self.fix_m
|
||||
w = self.fix_w
|
||||
u = self.fix_u
|
||||
K = self.fix_K
|
||||
else:
|
||||
n = self.n.rvs()
|
||||
m = self.m.rvs()
|
||||
w = np.array([self.w.rvs(n) for _ in range(m)])
|
||||
u = self.u.rvs(n)
|
||||
K = self.K.rvs()
|
||||
w = w * np.array([self.w_jitter.rvs(n) for _ in range(m)])
|
||||
alpha = self.alpha.rvs(m)
|
||||
p = np.array(
|
||||
[w[:, j].sum() / m + K * u[j] for j in range(n)]
|
||||
) * self.p_jitter.rvs(n)
|
||||
b = np.array([w[i, :].sum() * alpha[i] for i in range(m)])
|
||||
if self.round:
|
||||
p = p.round()
|
||||
b = b.round()
|
||||
w = w.round()
|
||||
return MultiKnapsackData(p, b, w)
|
||||
|
||||
return [_sample() for _ in range(n_samples)]
|
||||
|
||||
|
||||
def build_multiknapsack_model(data: Union[str, MultiKnapsackData]) -> GurobiModel:
|
||||
"""Converts multi-knapsack problem data into a concrete Gurobipy model."""
|
||||
if isinstance(data, str):
|
||||
data = read_pkl_gz(data)
|
||||
assert isinstance(data, MultiKnapsackData)
|
||||
|
||||
model = gp.Model()
|
||||
m, n = data.weights.shape
|
||||
x = model.addMVar(n, vtype=GRB.BINARY, name="x")
|
||||
model.addConstr(data.weights @ x <= data.capacities)
|
||||
model.setObjective(-data.prices @ x)
|
||||
model.update()
|
||||
return GurobiModel(model)
|
||||
185
miplearn/problems/pmedian.py
Normal file
185
miplearn/problems/pmedian.py
Normal file
@@ -0,0 +1,185 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020-2022, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import List, Optional, Union
|
||||
|
||||
import gurobipy as gp
|
||||
import numpy as np
|
||||
from gurobipy import quicksum, GRB
|
||||
from scipy.spatial.distance import pdist, squareform
|
||||
from scipy.stats import uniform, randint
|
||||
from scipy.stats.distributions import rv_frozen
|
||||
|
||||
from miplearn.io import read_pkl_gz
|
||||
from miplearn.solvers.gurobi import GurobiModel
|
||||
|
||||
|
||||
@dataclass
|
||||
class PMedianData:
|
||||
"""Data for the capacitated p-median problem
|
||||
|
||||
Args
|
||||
----
|
||||
distances
|
||||
Matrix of distances between customer i and facility j.
|
||||
demands
|
||||
Customer demands.
|
||||
p
|
||||
Number of medians that need to be chosen.
|
||||
capacities
|
||||
Facility capacities.
|
||||
"""
|
||||
|
||||
distances: np.ndarray
|
||||
demands: np.ndarray
|
||||
p: int
|
||||
capacities: np.ndarray
|
||||
|
||||
|
||||
class PMedianGenerator:
|
||||
"""Random generator for the capacitated p-median problem.
|
||||
|
||||
This class first decides the number of customers and the parameter `p` by
|
||||
sampling the provided `n` and `p` distributions, respectively. Then, for each
|
||||
customer `i`, the class builds its geographical location `(xi, yi)` by sampling
|
||||
the provided `x` and `y` distributions. For each `i`, the demand for customer `i`
|
||||
and the capacity of facility `i` are decided by sampling the distributions
|
||||
`demands` and `capacities`, respectively. Finally, the costs `w[i,j]` are set to
|
||||
the Euclidean distance between the locations of customers `i` and `j`.
|
||||
|
||||
If `fixed=True`, then the number of customers, their locations, the parameter
|
||||
`p`, the demands and the capacities are only sampled from their respective
|
||||
distributions exactly once, to build a reference instance which is then
|
||||
perturbed. Specifically, for each perturbation, the distances, demands and
|
||||
capacities are multiplied by factors sampled from the distributions
|
||||
`distances_jitter`, `demands_jitter` and `capacities_jitter`, respectively. The
|
||||
result is a list of instances that have the same set of customers, but slightly
|
||||
different demands, capacities and distances.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x
|
||||
Probability distribution for the x-coordinate of the points.
|
||||
y
|
||||
Probability distribution for the y-coordinate of the points.
|
||||
n
|
||||
Probability distribution for the number of customer.
|
||||
p
|
||||
Probability distribution for the number of medians.
|
||||
demands
|
||||
Probability distribution for the customer demands.
|
||||
capacities
|
||||
Probability distribution for the facility capacities.
|
||||
distances_jitter
|
||||
Probability distribution for the random scaling factor applied to distances.
|
||||
demands_jitter
|
||||
Probability distribution for the random scaling factor applied to demands.
|
||||
capacities_jitter
|
||||
Probability distribution for the random scaling factor applied to capacities.
|
||||
fixed
|
||||
If `True`, then customer are kept the same across instances.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
x: rv_frozen = uniform(loc=0.0, scale=100.0),
|
||||
y: rv_frozen = uniform(loc=0.0, scale=100.0),
|
||||
n: rv_frozen = randint(low=100, high=101),
|
||||
p: rv_frozen = randint(low=10, high=11),
|
||||
demands: rv_frozen = uniform(loc=0, scale=20),
|
||||
capacities: rv_frozen = uniform(loc=0, scale=100),
|
||||
distances_jitter: rv_frozen = uniform(loc=1.0, scale=0.0),
|
||||
demands_jitter: rv_frozen = uniform(loc=1.0, scale=0.0),
|
||||
capacities_jitter: rv_frozen = uniform(loc=1.0, scale=0.0),
|
||||
fixed: bool = True,
|
||||
):
|
||||
self.x = x
|
||||
self.y = y
|
||||
self.n = n
|
||||
self.p = p
|
||||
self.demands = demands
|
||||
self.capacities = capacities
|
||||
self.distances_jitter = distances_jitter
|
||||
self.demands_jitter = demands_jitter
|
||||
self.capacities_jitter = capacities_jitter
|
||||
self.fixed = fixed
|
||||
self.ref_data: Optional[PMedianData] = None
|
||||
|
||||
def generate(self, n_samples: int) -> List[PMedianData]:
|
||||
def _sample() -> PMedianData:
|
||||
if self.ref_data is None:
|
||||
n = self.n.rvs()
|
||||
p = self.p.rvs()
|
||||
loc = np.array([(self.x.rvs(), self.y.rvs()) for _ in range(n)])
|
||||
distances = squareform(pdist(loc))
|
||||
demands = self.demands.rvs(n)
|
||||
capacities = self.capacities.rvs(n)
|
||||
else:
|
||||
n = self.ref_data.demands.shape[0]
|
||||
distances = self.ref_data.distances * self.distances_jitter.rvs(
|
||||
size=(n, n)
|
||||
)
|
||||
distances = np.tril(distances) + np.triu(distances.T, 1)
|
||||
demands = self.ref_data.demands * self.demands_jitter.rvs(n)
|
||||
capacities = self.ref_data.capacities * self.capacities_jitter.rvs(n)
|
||||
p = self.ref_data.p
|
||||
|
||||
data = PMedianData(
|
||||
distances=distances.round(2),
|
||||
demands=demands.round(2),
|
||||
p=p,
|
||||
capacities=capacities.round(2),
|
||||
)
|
||||
|
||||
if self.fixed and self.ref_data is None:
|
||||
self.ref_data = data
|
||||
|
||||
return data
|
||||
|
||||
return [_sample() for _ in range(n_samples)]
|
||||
|
||||
|
||||
def build_pmedian_model(data: Union[str, PMedianData]) -> GurobiModel:
|
||||
"""Converts capacitated p-median data into a concrete Gurobipy model."""
|
||||
if isinstance(data, str):
|
||||
data = read_pkl_gz(data)
|
||||
assert isinstance(data, PMedianData)
|
||||
|
||||
model = gp.Model()
|
||||
n = len(data.demands)
|
||||
|
||||
# Decision variables
|
||||
x = model.addVars(n, n, vtype=GRB.BINARY, name="x")
|
||||
y = model.addVars(n, vtype=GRB.BINARY, name="y")
|
||||
|
||||
# Objective function
|
||||
model.setObjective(
|
||||
quicksum(data.distances[i, j] * x[i, j] for i in range(n) for j in range(n))
|
||||
)
|
||||
|
||||
# Eq: Must serve each customer
|
||||
model.addConstrs(
|
||||
(quicksum(x[i, j] for j in range(n)) == 1 for i in range(n)),
|
||||
name="eq_demand",
|
||||
)
|
||||
|
||||
# Eq: Must choose p medians
|
||||
model.addConstr(
|
||||
quicksum(y[j] for j in range(n)) == data.p,
|
||||
name="eq_choose",
|
||||
)
|
||||
|
||||
# Eq: Must not exceed capacity
|
||||
model.addConstrs(
|
||||
(
|
||||
quicksum(data.demands[i] * x[i, j] for i in range(n))
|
||||
<= data.capacities[j] * y[j]
|
||||
for j in range(n)
|
||||
),
|
||||
name="eq_capacity",
|
||||
)
|
||||
|
||||
model.update()
|
||||
return GurobiModel(model)
|
||||
120
miplearn/problems/setcover.py
Normal file
120
miplearn/problems/setcover.py
Normal file
@@ -0,0 +1,120 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020-2022, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import List, Union
|
||||
|
||||
import gurobipy as gp
|
||||
import numpy as np
|
||||
import pyomo.environ as pe
|
||||
from gurobipy.gurobipy import GRB
|
||||
from scipy.stats import uniform, randint
|
||||
from scipy.stats.distributions import rv_frozen
|
||||
|
||||
from miplearn.io import read_pkl_gz
|
||||
from miplearn.solvers.gurobi import GurobiModel
|
||||
from miplearn.solvers.pyomo import PyomoModel
|
||||
|
||||
|
||||
@dataclass
|
||||
class SetCoverData:
|
||||
costs: np.ndarray
|
||||
incidence_matrix: np.ndarray
|
||||
|
||||
|
||||
class SetCoverGenerator:
|
||||
def __init__(
|
||||
self,
|
||||
n_elements: rv_frozen = randint(low=50, high=51),
|
||||
n_sets: rv_frozen = randint(low=100, high=101),
|
||||
costs: rv_frozen = uniform(loc=0.0, scale=100.0),
|
||||
costs_jitter: rv_frozen = uniform(loc=-5.0, scale=10.0),
|
||||
K: rv_frozen = uniform(loc=25.0, scale=0.0),
|
||||
density: rv_frozen = uniform(loc=0.02, scale=0.00),
|
||||
fix_sets: bool = True,
|
||||
):
|
||||
self.n_elements = n_elements
|
||||
self.n_sets = n_sets
|
||||
self.costs = costs
|
||||
self.costs_jitter = costs_jitter
|
||||
self.density = density
|
||||
self.K = K
|
||||
self.fix_sets = fix_sets
|
||||
self.fixed_costs = None
|
||||
self.fixed_matrix = None
|
||||
|
||||
def generate(self, n_samples: int) -> List[SetCoverData]:
|
||||
def _sample() -> SetCoverData:
|
||||
if self.fixed_matrix is None:
|
||||
n_sets = self.n_sets.rvs()
|
||||
n_elements = self.n_elements.rvs()
|
||||
density = self.density.rvs()
|
||||
|
||||
incidence_matrix = np.random.rand(n_elements, n_sets) < density
|
||||
incidence_matrix = incidence_matrix.astype(int)
|
||||
|
||||
# Ensure each element belongs to at least one set
|
||||
for j in range(n_elements):
|
||||
if incidence_matrix[j, :].sum() == 0:
|
||||
incidence_matrix[j, randint(low=0, high=n_sets).rvs()] = 1
|
||||
|
||||
# Ensure each set contains at least one element
|
||||
for i in range(n_sets):
|
||||
if incidence_matrix[:, i].sum() == 0:
|
||||
incidence_matrix[randint(low=0, high=n_elements).rvs(), i] = 1
|
||||
|
||||
costs = self.costs.rvs(n_sets) + self.K.rvs() * incidence_matrix.sum(
|
||||
axis=0
|
||||
)
|
||||
if self.fix_sets:
|
||||
self.fixed_matrix = incidence_matrix
|
||||
self.fixed_costs = costs
|
||||
else:
|
||||
incidence_matrix = self.fixed_matrix
|
||||
(_, n_sets) = incidence_matrix.shape
|
||||
costs = self.fixed_costs * self.costs_jitter.rvs(n_sets)
|
||||
return SetCoverData(
|
||||
costs=costs.round(2),
|
||||
incidence_matrix=incidence_matrix,
|
||||
)
|
||||
|
||||
return [_sample() for _ in range(n_samples)]
|
||||
|
||||
|
||||
def build_setcover_model_gurobipy(data: Union[str, SetCoverData]) -> GurobiModel:
|
||||
data = _read_setcover_data(data)
|
||||
(n_elements, n_sets) = data.incidence_matrix.shape
|
||||
model = gp.Model()
|
||||
x = model.addMVar(n_sets, vtype=GRB.BINARY, name="x")
|
||||
model.addConstr(data.incidence_matrix @ x >= np.ones(n_elements), name="eqs")
|
||||
model.setObjective(data.costs @ x)
|
||||
model.update()
|
||||
return GurobiModel(model)
|
||||
|
||||
|
||||
def build_setcover_model_pyomo(
|
||||
data: Union[str, SetCoverData],
|
||||
solver="gurobi_persistent",
|
||||
) -> PyomoModel:
|
||||
data = _read_setcover_data(data)
|
||||
(n_elements, n_sets) = data.incidence_matrix.shape
|
||||
model = pe.ConcreteModel()
|
||||
model.sets = pe.Set(initialize=range(n_sets))
|
||||
model.x = pe.Var(model.sets, domain=pe.Boolean, name="x")
|
||||
model.eqs = pe.Constraint(pe.Any)
|
||||
for i in range(n_elements):
|
||||
model.eqs[i] = (
|
||||
sum(data.incidence_matrix[i, j] * model.x[j] for j in range(n_sets)) >= 1
|
||||
)
|
||||
model.obj = pe.Objective(
|
||||
expr=sum(data.costs[j] * model.x[j] for j in range(n_sets))
|
||||
)
|
||||
return PyomoModel(model, solver)
|
||||
|
||||
|
||||
def _read_setcover_data(data: Union[str, SetCoverData]) -> SetCoverData:
|
||||
if isinstance(data, str):
|
||||
data = read_pkl_gz(data)
|
||||
assert isinstance(data, SetCoverData)
|
||||
return data
|
||||
66
miplearn/problems/setpack.py
Normal file
66
miplearn/problems/setpack.py
Normal file
@@ -0,0 +1,66 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020-2022, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import List, Union
|
||||
|
||||
import gurobipy as gp
|
||||
import numpy as np
|
||||
from gurobipy.gurobipy import GRB
|
||||
from scipy.stats import uniform, randint
|
||||
from scipy.stats.distributions import rv_frozen
|
||||
|
||||
from .setcover import SetCoverGenerator
|
||||
from miplearn.solvers.gurobi import GurobiModel
|
||||
from ..io import read_pkl_gz
|
||||
|
||||
|
||||
@dataclass
|
||||
class SetPackData:
|
||||
costs: np.ndarray
|
||||
incidence_matrix: np.ndarray
|
||||
|
||||
|
||||
class SetPackGenerator:
|
||||
def __init__(
|
||||
self,
|
||||
n_elements: rv_frozen = randint(low=50, high=51),
|
||||
n_sets: rv_frozen = randint(low=100, high=101),
|
||||
costs: rv_frozen = uniform(loc=0.0, scale=100.0),
|
||||
costs_jitter: rv_frozen = uniform(loc=-5.0, scale=10.0),
|
||||
K: rv_frozen = uniform(loc=25.0, scale=0.0),
|
||||
density: rv_frozen = uniform(loc=0.02, scale=0.00),
|
||||
fix_sets: bool = True,
|
||||
) -> None:
|
||||
self.gen = SetCoverGenerator(
|
||||
n_elements=n_elements,
|
||||
n_sets=n_sets,
|
||||
costs=costs,
|
||||
costs_jitter=costs_jitter,
|
||||
K=K,
|
||||
density=density,
|
||||
fix_sets=fix_sets,
|
||||
)
|
||||
|
||||
def generate(self, n_samples: int) -> List[SetPackData]:
|
||||
return [
|
||||
SetPackData(
|
||||
s.costs,
|
||||
s.incidence_matrix,
|
||||
)
|
||||
for s in self.gen.generate(n_samples)
|
||||
]
|
||||
|
||||
|
||||
def build_setpack_model(data: Union[str, SetPackData]) -> GurobiModel:
|
||||
if isinstance(data, str):
|
||||
data = read_pkl_gz(data)
|
||||
assert isinstance(data, SetPackData)
|
||||
(n_elements, n_sets) = data.incidence_matrix.shape
|
||||
model = gp.Model()
|
||||
x = model.addMVar(n_sets, vtype=GRB.BINARY, name="x")
|
||||
model.addConstr(data.incidence_matrix @ x <= np.ones(n_elements))
|
||||
model.setObjective(-data.costs @ x)
|
||||
model.update()
|
||||
return GurobiModel(model)
|
||||
@@ -1,19 +1,22 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
|
||||
# Copyright (C) 2020-2022, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import List
|
||||
from typing import List, Union
|
||||
|
||||
import gurobipy as gp
|
||||
import networkx as nx
|
||||
import numpy as np
|
||||
import pyomo.environ as pe
|
||||
from gurobipy import GRB, quicksum
|
||||
from networkx import Graph
|
||||
from overrides import overrides
|
||||
from scipy.stats import uniform, randint
|
||||
from scipy.stats.distributions import rv_frozen
|
||||
|
||||
from miplearn.instance.base import Instance
|
||||
from miplearn.io import read_pkl_gz
|
||||
from miplearn.solvers.gurobi import GurobiModel
|
||||
from miplearn.solvers.pyomo import PyomoModel
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -22,36 +25,6 @@ class MaxWeightStableSetData:
|
||||
weights: np.ndarray
|
||||
|
||||
|
||||
class MaxWeightStableSetInstance(Instance):
|
||||
"""An instance of the Maximum-Weight Stable Set Problem.
|
||||
|
||||
Given a graph G=(V,E) and a weight w_v for each vertex v, the problem asks for a stable
|
||||
set S of G maximizing sum(w_v for v in S). A stable set (also called independent set) is
|
||||
a subset of vertices, no two of which are adjacent.
|
||||
|
||||
This is one of Karp's 21 NP-complete problems.
|
||||
"""
|
||||
|
||||
def __init__(self, graph: Graph, weights: np.ndarray) -> None:
|
||||
super().__init__()
|
||||
self.graph = graph
|
||||
self.weights = weights
|
||||
self.nodes = list(self.graph.nodes)
|
||||
|
||||
@overrides
|
||||
def to_model(self) -> pe.ConcreteModel:
|
||||
model = pe.ConcreteModel()
|
||||
model.x = pe.Var(self.nodes, domain=pe.Binary)
|
||||
model.OBJ = pe.Objective(
|
||||
expr=sum(model.x[v] * self.weights[v] for v in self.nodes),
|
||||
sense=pe.maximize,
|
||||
)
|
||||
model.clique_eqs = pe.ConstraintList()
|
||||
for clique in nx.find_cliques(self.graph):
|
||||
model.clique_eqs.add(sum(model.x[v] for v in clique) <= 1)
|
||||
return model
|
||||
|
||||
|
||||
class MaxWeightStableSetGenerator:
|
||||
"""Random instance generator for the Maximum-Weight Stable Set Problem.
|
||||
|
||||
@@ -100,7 +73,7 @@ class MaxWeightStableSetGenerator:
|
||||
graph = self.graph
|
||||
else:
|
||||
graph = self._generate_graph()
|
||||
weights = self.w.rvs(graph.number_of_nodes())
|
||||
weights = np.round(self.w.rvs(graph.number_of_nodes()), 2)
|
||||
return MaxWeightStableSetData(graph, weights)
|
||||
|
||||
return [_sample() for _ in range(n_samples)]
|
||||
@@ -109,15 +82,35 @@ class MaxWeightStableSetGenerator:
|
||||
return nx.generators.random_graphs.binomial_graph(self.n.rvs(), self.p.rvs())
|
||||
|
||||
|
||||
def build_stab_model(data: MaxWeightStableSetData) -> pe.ConcreteModel:
|
||||
model = pe.ConcreteModel()
|
||||
def build_stab_model_gurobipy(data: MaxWeightStableSetData) -> GurobiModel:
|
||||
data = _read_stab_data(data)
|
||||
model = gp.Model()
|
||||
nodes = list(data.graph.nodes)
|
||||
model.x = pe.Var(nodes, domain=pe.Binary)
|
||||
model.OBJ = pe.Objective(
|
||||
expr=sum(-model.x[v] * data.weights[v] for v in nodes),
|
||||
sense=pe.minimize,
|
||||
)
|
||||
x = model.addVars(nodes, vtype=GRB.BINARY, name="x")
|
||||
model.setObjective(quicksum(-data.weights[i] * x[i] for i in nodes))
|
||||
for clique in nx.find_cliques(data.graph):
|
||||
model.addConstr(quicksum(x[i] for i in clique) <= 1)
|
||||
model.update()
|
||||
return GurobiModel(model)
|
||||
|
||||
|
||||
def build_stab_model_pyomo(
|
||||
data: MaxWeightStableSetData,
|
||||
solver="gurobi_persistent",
|
||||
) -> PyomoModel:
|
||||
data = _read_stab_data(data)
|
||||
model = pe.ConcreteModel()
|
||||
nodes = pe.Set(initialize=list(data.graph.nodes))
|
||||
model.x = pe.Var(nodes, domain=pe.Boolean, name="x")
|
||||
model.obj = pe.Objective(expr=sum([-data.weights[i] * model.x[i] for i in nodes]))
|
||||
model.clique_eqs = pe.ConstraintList()
|
||||
for clique in nx.find_cliques(data.graph):
|
||||
model.clique_eqs.add(sum(model.x[v] for v in clique) <= 1)
|
||||
return model
|
||||
model.clique_eqs.add(expr=sum(model.x[i] for i in clique) <= 1)
|
||||
return PyomoModel(model, solver)
|
||||
|
||||
|
||||
def _read_stab_data(data: Union[str, MaxWeightStableSetData]) -> MaxWeightStableSetData:
|
||||
if isinstance(data, str):
|
||||
data = read_pkl_gz(data)
|
||||
assert isinstance(data, MaxWeightStableSetData)
|
||||
return data
|
||||
|
||||
@@ -1,22 +1,23 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
|
||||
# Copyright (C) 2020-2022, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import List, Tuple, Any, Optional, Dict
|
||||
from typing import List, Tuple, Optional, Any, Union
|
||||
|
||||
import gurobipy as gp
|
||||
import networkx as nx
|
||||
import numpy as np
|
||||
import pyomo.environ as pe
|
||||
from overrides import overrides
|
||||
from gurobipy import quicksum, GRB, tuplelist
|
||||
from scipy.spatial.distance import pdist, squareform
|
||||
from scipy.stats import uniform, randint
|
||||
from scipy.stats.distributions import rv_frozen
|
||||
import logging
|
||||
|
||||
from miplearn.instance.base import Instance
|
||||
from miplearn.solvers.learning import InternalSolver
|
||||
from miplearn.solvers.pyomo.base import BasePyomoSolver
|
||||
from miplearn.types import ConstraintName
|
||||
from miplearn.io import read_pkl_gz
|
||||
from miplearn.solvers.gurobi import GurobiModel
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -25,80 +26,6 @@ class TravelingSalesmanData:
|
||||
distances: np.ndarray
|
||||
|
||||
|
||||
class TravelingSalesmanInstance(Instance):
|
||||
"""An instance ot the Traveling Salesman Problem.
|
||||
|
||||
Given a list of cities and the distance between each pair of cities, the problem
|
||||
asks for the shortest route starting at the first city, visiting each other city
|
||||
exactly once, then returning to the first city. This problem is a generalization
|
||||
of the Hamiltonian path problem, one of Karp's 21 NP-complete problems.
|
||||
"""
|
||||
|
||||
def __init__(self, n_cities: int, distances: np.ndarray) -> None:
|
||||
super().__init__()
|
||||
assert isinstance(distances, np.ndarray)
|
||||
assert distances.shape == (n_cities, n_cities)
|
||||
self.n_cities = n_cities
|
||||
self.distances = distances
|
||||
self.edges = [
|
||||
(i, j) for i in range(self.n_cities) for j in range(i + 1, self.n_cities)
|
||||
]
|
||||
|
||||
@overrides
|
||||
def to_model(self) -> pe.ConcreteModel:
|
||||
model = pe.ConcreteModel()
|
||||
model.x = pe.Var(self.edges, domain=pe.Binary)
|
||||
model.obj = pe.Objective(
|
||||
expr=sum(model.x[i, j] * self.distances[i, j] for (i, j) in self.edges),
|
||||
sense=pe.minimize,
|
||||
)
|
||||
model.eq_degree = pe.ConstraintList()
|
||||
model.eq_subtour = pe.ConstraintList()
|
||||
for i in range(self.n_cities):
|
||||
model.eq_degree.add(
|
||||
sum(
|
||||
model.x[min(i, j), max(i, j)]
|
||||
for j in range(self.n_cities)
|
||||
if i != j
|
||||
)
|
||||
== 2
|
||||
)
|
||||
return model
|
||||
|
||||
@overrides
|
||||
def find_violated_lazy_constraints(
|
||||
self,
|
||||
solver: InternalSolver,
|
||||
model: Any,
|
||||
) -> Dict[ConstraintName, List]:
|
||||
selected_edges = [e for e in self.edges if model.x[e].value > 0.5]
|
||||
graph = nx.Graph()
|
||||
graph.add_edges_from(selected_edges)
|
||||
violations = {}
|
||||
for c in list(nx.connected_components(graph)):
|
||||
if len(c) < self.n_cities:
|
||||
cname = ("st[" + ",".join(map(str, c)) + "]").encode()
|
||||
violations[cname] = list(c)
|
||||
return violations
|
||||
|
||||
@overrides
|
||||
def enforce_lazy_constraint(
|
||||
self,
|
||||
solver: InternalSolver,
|
||||
model: Any,
|
||||
component: List,
|
||||
) -> None:
|
||||
assert isinstance(solver, BasePyomoSolver)
|
||||
cut_edges = [
|
||||
e
|
||||
for e in self.edges
|
||||
if (e[0] in component and e[1] not in component)
|
||||
or (e[0] not in component and e[1] in component)
|
||||
]
|
||||
constr = model.eq_subtour.add(expr=sum(model.x[e] for e in cut_edges) >= 2)
|
||||
solver.add_constraint(constr)
|
||||
|
||||
|
||||
class TravelingSalesmanGenerator:
|
||||
"""Random generator for the Traveling Salesman Problem."""
|
||||
|
||||
@@ -118,7 +45,7 @@ class TravelingSalesmanGenerator:
|
||||
distributions `n`, `x` and `y`. For each (unordered) pair of cities (i,j),
|
||||
the distance d[i,j] between them is set to:
|
||||
|
||||
d[i,j] = gamma[i,j] \sqrt{(x_i - x_j)^2 + (y_i - y_j)^2}
|
||||
d[i,j] = gamma[i,j] \\sqrt{(x_i - x_j)^2 + (y_i - y_j)^2}
|
||||
|
||||
where gamma is sampled from the provided probability distribution `gamma`.
|
||||
|
||||
@@ -183,3 +110,68 @@ class TravelingSalesmanGenerator:
|
||||
n = self.n.rvs()
|
||||
cities = np.array([(self.x.rvs(), self.y.rvs()) for _ in range(n)])
|
||||
return n, cities
|
||||
|
||||
|
||||
def build_tsp_model(data: Union[str, TravelingSalesmanData]) -> GurobiModel:
|
||||
if isinstance(data, str):
|
||||
data = read_pkl_gz(data)
|
||||
assert isinstance(data, TravelingSalesmanData)
|
||||
|
||||
edges = tuplelist(
|
||||
(i, j) for i in range(data.n_cities) for j in range(i + 1, data.n_cities)
|
||||
)
|
||||
model = gp.Model()
|
||||
|
||||
# Decision variables
|
||||
x = model.addVars(edges, vtype=GRB.BINARY, name="x")
|
||||
|
||||
model._x = x
|
||||
model._edges = edges
|
||||
model._n_cities = data.n_cities
|
||||
|
||||
# Objective function
|
||||
model.setObjective(quicksum(x[(i, j)] * data.distances[i, j] for (i, j) in edges))
|
||||
|
||||
# Eq: Must choose two edges adjacent to each node
|
||||
model.addConstrs(
|
||||
(
|
||||
quicksum(x[min(i, j), max(i, j)] for j in range(data.n_cities) if i != j)
|
||||
== 2
|
||||
for i in range(data.n_cities)
|
||||
),
|
||||
name="eq_degree",
|
||||
)
|
||||
|
||||
def find_violations(model: GurobiModel) -> List[Any]:
|
||||
violations = []
|
||||
x = model.inner.cbGetSolution(model.inner._x)
|
||||
selected_edges = [e for e in model.inner._edges if x[e] > 0.5]
|
||||
graph = nx.Graph()
|
||||
graph.add_edges_from(selected_edges)
|
||||
for component in list(nx.connected_components(graph)):
|
||||
if len(component) < model.inner._n_cities:
|
||||
cut_edges = [
|
||||
e
|
||||
for e in model.inner._edges
|
||||
if (e[0] in component and e[1] not in component)
|
||||
or (e[0] not in component and e[1] in component)
|
||||
]
|
||||
violations.append(cut_edges)
|
||||
return violations
|
||||
|
||||
def fix_violations(model: GurobiModel, violations: List[Any], where: str) -> None:
|
||||
for violation in violations:
|
||||
constr = quicksum(model.inner._x[e[0], e[1]] for e in violation) >= 2
|
||||
if where == "cb":
|
||||
model.inner.cbLazy(constr)
|
||||
else:
|
||||
model.inner.addConstr(constr)
|
||||
logger.info(f"tsp: added {len(violations)} subtour elimination constraints")
|
||||
|
||||
model.update()
|
||||
|
||||
return GurobiModel(
|
||||
model,
|
||||
find_violations=find_violations,
|
||||
fix_violations=fix_violations,
|
||||
)
|
||||
|
||||
201
miplearn/problems/uc.py
Normal file
201
miplearn/problems/uc.py
Normal file
@@ -0,0 +1,201 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020-2022, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from dataclasses import dataclass
|
||||
from math import pi
|
||||
from typing import List, Optional, Union
|
||||
|
||||
import gurobipy as gp
|
||||
import numpy as np
|
||||
from gurobipy import GRB, quicksum
|
||||
from scipy.stats import uniform, randint
|
||||
from scipy.stats.distributions import rv_frozen
|
||||
|
||||
from miplearn.io import read_pkl_gz
|
||||
from miplearn.solvers.gurobi import GurobiModel
|
||||
|
||||
|
||||
@dataclass
|
||||
class UnitCommitmentData:
|
||||
demand: np.ndarray
|
||||
min_power: np.ndarray
|
||||
max_power: np.ndarray
|
||||
min_uptime: np.ndarray
|
||||
min_downtime: np.ndarray
|
||||
cost_startup: np.ndarray
|
||||
cost_prod: np.ndarray
|
||||
cost_fixed: np.ndarray
|
||||
|
||||
|
||||
class UnitCommitmentGenerator:
|
||||
def __init__(
|
||||
self,
|
||||
n_units: rv_frozen = randint(low=1_000, high=1_001),
|
||||
n_periods: rv_frozen = randint(low=72, high=73),
|
||||
max_power: rv_frozen = uniform(loc=50, scale=450),
|
||||
min_power: rv_frozen = uniform(loc=0.5, scale=0.25),
|
||||
cost_startup: rv_frozen = uniform(loc=0, scale=10_000),
|
||||
cost_prod: rv_frozen = uniform(loc=0, scale=50),
|
||||
cost_fixed: rv_frozen = uniform(loc=0, scale=1_000),
|
||||
min_uptime: rv_frozen = randint(low=2, high=8),
|
||||
min_downtime: rv_frozen = randint(low=2, high=8),
|
||||
cost_jitter: rv_frozen = uniform(loc=0.75, scale=0.5),
|
||||
demand_jitter: rv_frozen = uniform(loc=0.9, scale=0.2),
|
||||
fix_units: bool = False,
|
||||
) -> None:
|
||||
self.n_units = n_units
|
||||
self.n_periods = n_periods
|
||||
self.max_power = max_power
|
||||
self.min_power = min_power
|
||||
self.cost_startup = cost_startup
|
||||
self.cost_prod = cost_prod
|
||||
self.cost_fixed = cost_fixed
|
||||
self.min_uptime = min_uptime
|
||||
self.min_downtime = min_downtime
|
||||
self.cost_jitter = cost_jitter
|
||||
self.demand_jitter = demand_jitter
|
||||
self.fix_units = fix_units
|
||||
self.ref_data: Optional[UnitCommitmentData] = None
|
||||
|
||||
def generate(self, n_samples: int) -> List[UnitCommitmentData]:
|
||||
def _sample() -> UnitCommitmentData:
|
||||
if self.ref_data is None:
|
||||
T = self.n_periods.rvs()
|
||||
G = self.n_units.rvs()
|
||||
|
||||
# Generate unit parameteres
|
||||
max_power = self.max_power.rvs(G)
|
||||
min_power = max_power * self.min_power.rvs(G)
|
||||
max_power = max_power
|
||||
min_uptime = self.min_uptime.rvs(G)
|
||||
min_downtime = self.min_downtime.rvs(G)
|
||||
cost_startup = self.cost_startup.rvs(G)
|
||||
cost_prod = self.cost_prod.rvs(G)
|
||||
cost_fixed = self.cost_fixed.rvs(G)
|
||||
capacity = max_power.sum()
|
||||
|
||||
# Generate periodic demand in the range [0.4, 0.8] * capacity, with a peak every 12 hours.
|
||||
demand = np.sin([i / 6 * pi for i in range(T)])
|
||||
demand *= uniform(loc=0, scale=1).rvs(T)
|
||||
demand -= demand.min()
|
||||
demand /= demand.max() / 0.4
|
||||
demand += 0.4
|
||||
demand *= capacity
|
||||
else:
|
||||
T, G = len(self.ref_data.demand), len(self.ref_data.max_power)
|
||||
demand = self.ref_data.demand * self.demand_jitter.rvs(T)
|
||||
min_power = self.ref_data.min_power
|
||||
max_power = self.ref_data.max_power
|
||||
min_uptime = self.ref_data.min_uptime
|
||||
min_downtime = self.ref_data.min_downtime
|
||||
cost_startup = self.ref_data.cost_startup * self.cost_jitter.rvs(G)
|
||||
cost_prod = self.ref_data.cost_prod * self.cost_jitter.rvs(G)
|
||||
cost_fixed = self.ref_data.cost_fixed * self.cost_jitter.rvs(G)
|
||||
|
||||
data = UnitCommitmentData(
|
||||
demand.round(2),
|
||||
min_power.round(2),
|
||||
max_power.round(2),
|
||||
min_uptime,
|
||||
min_downtime,
|
||||
cost_startup.round(2),
|
||||
cost_prod.round(2),
|
||||
cost_fixed.round(2),
|
||||
)
|
||||
|
||||
if self.ref_data is None and self.fix_units:
|
||||
self.ref_data = data
|
||||
|
||||
return data
|
||||
|
||||
return [_sample() for _ in range(n_samples)]
|
||||
|
||||
|
||||
def build_uc_model(data: Union[str, UnitCommitmentData]) -> GurobiModel:
|
||||
"""
|
||||
Models the unit commitment problem according to equations (1)-(5) of:
|
||||
|
||||
Bendotti, P., Fouilhoux, P. & Rottner, C. The min-up/min-down unit
|
||||
commitment polytope. J Comb Optim 36, 1024-1058 (2018).
|
||||
https://doi.org/10.1007/s10878-018-0273-y
|
||||
|
||||
"""
|
||||
if isinstance(data, str):
|
||||
data = read_pkl_gz(data)
|
||||
assert isinstance(data, UnitCommitmentData)
|
||||
|
||||
T = len(data.demand)
|
||||
G = len(data.min_power)
|
||||
D = data.demand
|
||||
Pmin, Pmax = data.min_power, data.max_power
|
||||
L = data.min_uptime
|
||||
l = data.min_downtime
|
||||
|
||||
model = gp.Model()
|
||||
is_on = model.addVars(G, T, vtype=GRB.BINARY, name="is_on")
|
||||
switch_on = model.addVars(G, T, vtype=GRB.BINARY, name="switch_on")
|
||||
prod = model.addVars(G, T, name="prod")
|
||||
|
||||
# Objective function
|
||||
model.setObjective(
|
||||
quicksum(
|
||||
is_on[g, t] * data.cost_fixed[g]
|
||||
+ switch_on[g, t] * data.cost_startup[g]
|
||||
+ prod[g, t] * data.cost_prod[g]
|
||||
for g in range(G)
|
||||
for t in range(T)
|
||||
)
|
||||
)
|
||||
|
||||
# Eq 1: Minimum up-time constraint: If unit g is down at time t, then it
|
||||
# cannot have start up during the previous L[g] periods.
|
||||
model.addConstrs(
|
||||
(
|
||||
quicksum(switch_on[g, k] for k in range(t - L[g] + 1, t + 1)) <= is_on[g, t]
|
||||
for g in range(G)
|
||||
for t in range(L[g] - 1, T)
|
||||
),
|
||||
name="eq_min_uptime",
|
||||
)
|
||||
|
||||
# Eq 2: Minimum down-time constraint: Symmetric to the minimum-up constraint.
|
||||
model.addConstrs(
|
||||
(
|
||||
quicksum(switch_on[g, k] for k in range(t - l[g] + 1, t + 1))
|
||||
<= 1 - is_on[g, t - l[g] + 1]
|
||||
for g in range(G)
|
||||
for t in range(l[g] - 1, T)
|
||||
),
|
||||
name="eq_min_downtime",
|
||||
)
|
||||
|
||||
# Eq 3: Ensures that if unit g start up at time t, then the start-up variable
|
||||
# must be one.
|
||||
model.addConstrs(
|
||||
(
|
||||
switch_on[g, t] >= is_on[g, t] - is_on[g, t - 1]
|
||||
for g in range(G)
|
||||
for t in range(1, T)
|
||||
),
|
||||
name="eq_startup",
|
||||
)
|
||||
|
||||
# Eq 4: Ensures that demand is satisfied at each time period.
|
||||
model.addConstrs(
|
||||
(quicksum(prod[g, t] for g in range(G)) >= D[t] for t in range(T)),
|
||||
name="eq_demand",
|
||||
)
|
||||
|
||||
# Eq 5: Sets the bounds to the quantity of power produced by each unit.
|
||||
model.addConstrs(
|
||||
(Pmin[g] * is_on[g, t] <= prod[g, t] for g in range(G) for t in range(T)),
|
||||
name="eq_prod_lb",
|
||||
)
|
||||
model.addConstrs(
|
||||
(prod[g, t] <= Pmax[g] * is_on[g, t] for g in range(G) for t in range(T)),
|
||||
name="eq_prod_ub",
|
||||
)
|
||||
model.update()
|
||||
|
||||
return GurobiModel(model)
|
||||
54
miplearn/problems/vertexcover.py
Normal file
54
miplearn/problems/vertexcover.py
Normal file
@@ -0,0 +1,54 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020-2022, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import List, Union
|
||||
|
||||
import gurobipy as gp
|
||||
import numpy as np
|
||||
from gurobipy import GRB, quicksum
|
||||
from networkx import Graph
|
||||
from scipy.stats import uniform, randint
|
||||
from scipy.stats.distributions import rv_frozen
|
||||
|
||||
from .stab import MaxWeightStableSetGenerator
|
||||
from miplearn.solvers.gurobi import GurobiModel
|
||||
from ..io import read_pkl_gz
|
||||
|
||||
|
||||
@dataclass
|
||||
class MinWeightVertexCoverData:
|
||||
graph: Graph
|
||||
weights: np.ndarray
|
||||
|
||||
|
||||
class MinWeightVertexCoverGenerator:
|
||||
def __init__(
|
||||
self,
|
||||
w: rv_frozen = uniform(loc=10.0, scale=1.0),
|
||||
n: rv_frozen = randint(low=250, high=251),
|
||||
p: rv_frozen = uniform(loc=0.05, scale=0.0),
|
||||
fix_graph: bool = True,
|
||||
):
|
||||
self._generator = MaxWeightStableSetGenerator(w, n, p, fix_graph)
|
||||
|
||||
def generate(self, n_samples: int) -> List[MinWeightVertexCoverData]:
|
||||
return [
|
||||
MinWeightVertexCoverData(s.graph, s.weights)
|
||||
for s in self._generator.generate(n_samples)
|
||||
]
|
||||
|
||||
|
||||
def build_vertexcover_model(data: Union[str, MinWeightVertexCoverData]) -> GurobiModel:
|
||||
if isinstance(data, str):
|
||||
data = read_pkl_gz(data)
|
||||
assert isinstance(data, MinWeightVertexCoverData)
|
||||
model = gp.Model()
|
||||
nodes = list(data.graph.nodes)
|
||||
x = model.addVars(nodes, vtype=GRB.BINARY, name="x")
|
||||
model.setObjective(quicksum(data.weights[i] * x[i] for i in nodes))
|
||||
for (v1, v2) in data.graph.edges:
|
||||
model.addConstr(x[v1] + x[v2] >= 1)
|
||||
model.update()
|
||||
return GurobiModel(model)
|
||||
Reference in New Issue
Block a user