mirror of
https://github.com/ANL-CEEESA/MIPLearn.git
synced 2025-12-06 01:18:52 -06:00
Move python files to root folder; remove built docs
This commit is contained in:
3
miplearn/problems/__init__.py
Normal file
3
miplearn/problems/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
275
miplearn/problems/knapsack.py
Normal file
275
miplearn/problems/knapsack.py
Normal file
@@ -0,0 +1,275 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
import miplearn
|
||||
from miplearn import Instance
|
||||
import numpy as np
|
||||
import pyomo.environ as pe
|
||||
from scipy.stats import uniform, randint, bernoulli
|
||||
from scipy.stats.distributions import rv_frozen
|
||||
|
||||
|
||||
class ChallengeA:
|
||||
"""
|
||||
- 250 variables, 10 constraints, fixed weights
|
||||
- w ~ U(0, 1000), jitter ~ U(0.95, 1.05)
|
||||
- K = 500, u ~ U(0., 1.)
|
||||
- alpha = 0.25
|
||||
"""
|
||||
def __init__(self,
|
||||
seed=42,
|
||||
n_training_instances=500,
|
||||
n_test_instances=50):
|
||||
|
||||
np.random.seed(seed)
|
||||
self.gen = MultiKnapsackGenerator(n=randint(low=250, high=251),
|
||||
m=randint(low=10, high=11),
|
||||
w=uniform(loc=0.0, scale=1000.0),
|
||||
K=uniform(loc=500.0, scale=0.0),
|
||||
u=uniform(loc=0.0, scale=1.0),
|
||||
alpha=uniform(loc=0.25, scale=0.0),
|
||||
fix_w=True,
|
||||
w_jitter=uniform(loc=0.95, scale=0.1),
|
||||
)
|
||||
np.random.seed(seed + 1)
|
||||
self.training_instances = self.gen.generate(n_training_instances)
|
||||
|
||||
np.random.seed(seed + 2)
|
||||
self.test_instances = self.gen.generate(n_test_instances)
|
||||
|
||||
|
||||
class MultiKnapsackInstance(Instance):
|
||||
"""Representation of the Multidimensional 0-1 Knapsack Problem.
|
||||
|
||||
Given a set of n items and m knapsacks, the problem is to find a subset of items S maximizing
|
||||
sum(prices[i] for i in S). If selected, each item i occupies weights[i,j] units of space in
|
||||
each knapsack j. Furthermore, each knapsack j has limited storage space, given by capacities[j].
|
||||
|
||||
This implementation assigns a different category for each decision variable, and therefore
|
||||
trains one ML model per variable. It is only suitable when training and test instances have
|
||||
same size and items don't shuffle around.
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
prices,
|
||||
capacities,
|
||||
weights):
|
||||
assert isinstance(prices, np.ndarray)
|
||||
assert isinstance(capacities, np.ndarray)
|
||||
assert isinstance(weights, np.ndarray)
|
||||
assert len(weights.shape) == 2
|
||||
self.m, self.n = weights.shape
|
||||
assert prices.shape == (self.n,)
|
||||
assert capacities.shape == (self.m,)
|
||||
self.prices = prices
|
||||
self.capacities = capacities
|
||||
self.weights = weights
|
||||
|
||||
def to_model(self):
|
||||
model = pe.ConcreteModel()
|
||||
model.x = pe.Var(range(self.n), domain=pe.Binary)
|
||||
model.OBJ = pe.Objective(rule=lambda model: sum(model.x[j] * self.prices[j]
|
||||
for j in range(self.n)),
|
||||
sense=pe.maximize)
|
||||
model.eq_capacity = pe.ConstraintList()
|
||||
for i in range(self.m):
|
||||
model.eq_capacity.add(sum(model.x[j] * self.weights[i,j]
|
||||
for j in range(self.n)) <= self.capacities[i])
|
||||
|
||||
return model
|
||||
|
||||
def get_instance_features(self):
|
||||
return np.hstack([
|
||||
np.mean(self.prices),
|
||||
self.capacities,
|
||||
])
|
||||
|
||||
def get_variable_features(self, var, index):
|
||||
return np.hstack([
|
||||
self.prices[index],
|
||||
self.weights[:, index],
|
||||
])
|
||||
|
||||
# def get_variable_category(self, var, index):
|
||||
# return index
|
||||
|
||||
|
||||
class MultiKnapsackGenerator:
|
||||
def __init__(self,
|
||||
n=randint(low=100, high=101),
|
||||
m=randint(low=30, high=31),
|
||||
w=randint(low=0, high=1000),
|
||||
K=randint(low=500, high=500),
|
||||
u=uniform(loc=0.0, scale=1.0),
|
||||
alpha=uniform(loc=0.25, scale=0.0),
|
||||
fix_w=False,
|
||||
w_jitter=uniform(loc=1.0, scale=0.0),
|
||||
round=True,
|
||||
):
|
||||
"""Initialize the problem generator.
|
||||
|
||||
Instances have a random number of items (or variables) and a random number of knapsacks
|
||||
(or constraints), as specified by the provided probability distributions `n` and `m`,
|
||||
respectively. The weight of each item `i` on knapsack `j` is sampled independently from
|
||||
the provided distribution `w`. The capacity of knapsack `j` is set to:
|
||||
|
||||
alpha_j * sum(w[i,j] for i in range(n)),
|
||||
|
||||
where `alpha_j`, the tightness ratio, is sampled from the provided probability
|
||||
distribution `alpha`. To make the instances more challenging, the costs of the items
|
||||
are linearly correlated to their average weights. More specifically, the weight of each
|
||||
item `i` is set to:
|
||||
|
||||
sum(w[i,j]/m for j in range(m)) + K * u_i,
|
||||
|
||||
where `K`, the correlation coefficient, and `u_i`, the correlation multiplier, are sampled
|
||||
from the provided probability distributions. Note that `K` is only sample once for the
|
||||
entire instance.
|
||||
|
||||
If fix_w=True is provided, then w[i,j] are kept the same in all generated instances. This
|
||||
also implies that n and m are kept fixed. Although the prices and capacities are derived
|
||||
from w[i,j], as long as u and K are not constants, the generated instances will still not
|
||||
be completely identical.
|
||||
|
||||
If a probability distribution w_jitter is provided, then item weights will be set to
|
||||
w[i,j] * gamma[i,j] where gamma[i,j] is sampled from w_jitter. When combined with
|
||||
fix_w=True, this argument may be used to generate instances where the weight of each item
|
||||
is roughly the same, but not exactly identical, across all instances. The prices of the
|
||||
items and the capacities of the knapsacks will be calculated as above, but using these
|
||||
perturbed weights instead.
|
||||
|
||||
By default, all generated prices, weights and capacities are rounded to the nearest integer
|
||||
number. If `round=False` is provided, this rounding will be disabled.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
n: rv_discrete
|
||||
Probability distribution for the number of items (or variables)
|
||||
m: rv_discrete
|
||||
Probability distribution for the number of knapsacks (or constraints)
|
||||
w: rv_continuous
|
||||
Probability distribution for the item weights
|
||||
K: rv_continuous
|
||||
Probability distribution for the profit correlation coefficient
|
||||
u: rv_continuous
|
||||
Probability distribution for the profit multiplier
|
||||
alpha: rv_continuous
|
||||
Probability distribution for the tightness ratio
|
||||
fix_w: boolean
|
||||
If true, weights are kept the same (minus the noise from w_jitter) in all instances
|
||||
w_jitter: rv_continuous
|
||||
Probability distribution for random noise added to the weights
|
||||
round: boolean
|
||||
If true, all prices, weights and capacities are rounded to the nearest integer
|
||||
"""
|
||||
assert isinstance(n, rv_frozen), "n should be a SciPy probability distribution"
|
||||
assert isinstance(m, rv_frozen), "m should be a SciPy probability distribution"
|
||||
assert isinstance(w, rv_frozen), "w should be a SciPy probability distribution"
|
||||
assert isinstance(K, rv_frozen), "K should be a SciPy probability distribution"
|
||||
assert isinstance(u, rv_frozen), "u should be a SciPy probability distribution"
|
||||
assert isinstance(alpha, rv_frozen), "alpha should be a SciPy probability distribution"
|
||||
assert isinstance(fix_w, bool), "fix_w should be boolean"
|
||||
assert isinstance(w_jitter, rv_frozen), \
|
||||
"w_jitter should be a SciPy probability distribution"
|
||||
|
||||
self.n = n
|
||||
self.m = m
|
||||
self.w = w
|
||||
self.K = K
|
||||
self.u = u
|
||||
self.alpha = alpha
|
||||
self.w_jitter = w_jitter
|
||||
self.round = round
|
||||
|
||||
if fix_w:
|
||||
self.fix_n = self.n.rvs()
|
||||
self.fix_m = self.m.rvs()
|
||||
self.fix_w = np.array([self.w.rvs(self.fix_n) for _ in range(self.fix_m)])
|
||||
self.fix_u = self.u.rvs(self.fix_n)
|
||||
self.fix_K = self.K.rvs()
|
||||
else:
|
||||
self.fix_n = None
|
||||
self.fix_m = None
|
||||
self.fix_w = None
|
||||
self.fix_u = None
|
||||
self.fix_K = None
|
||||
|
||||
def generate(self, n_samples):
|
||||
def _sample():
|
||||
if self.fix_w is not None:
|
||||
n = self.fix_n
|
||||
m = self.fix_m
|
||||
w = self.fix_w
|
||||
u = self.fix_u
|
||||
K = self.fix_K
|
||||
else:
|
||||
n = self.n.rvs()
|
||||
m = self.m.rvs()
|
||||
w = np.array([self.w.rvs(n) for _ in range(m)])
|
||||
u = self.u.rvs(n)
|
||||
K = self.K.rvs()
|
||||
w = w * np.array([self.w_jitter.rvs(n) for _ in range(m)])
|
||||
alpha = self.alpha.rvs(m)
|
||||
p = np.array([w[:,j].sum() / m + K * u[j] for j in range(n)])
|
||||
b = np.array([w[i,:].sum() * alpha[i] for i in range(m)])
|
||||
if self.round:
|
||||
p = p.round()
|
||||
b = b.round()
|
||||
w = w.round()
|
||||
return MultiKnapsackInstance(p, b, w)
|
||||
return [_sample() for _ in range(n_samples)]
|
||||
|
||||
|
||||
class KnapsackInstance(Instance):
|
||||
"""
|
||||
Simpler (one-dimensional) Knapsack Problem, used for testing.
|
||||
"""
|
||||
def __init__(self, weights, prices, capacity):
|
||||
self.weights = weights
|
||||
self.prices = prices
|
||||
self.capacity = capacity
|
||||
|
||||
def to_model(self):
|
||||
model = pe.ConcreteModel()
|
||||
items = range(len(self.weights))
|
||||
model.x = pe.Var(items, domain=pe.Binary)
|
||||
model.OBJ = pe.Objective(expr=sum(model.x[v] * self.prices[v] for v in items),
|
||||
sense=pe.maximize)
|
||||
model.eq_capacity = pe.Constraint(expr=sum(model.x[v] * self.weights[v]
|
||||
for v in items) <= self.capacity)
|
||||
return model
|
||||
|
||||
def get_instance_features(self):
|
||||
return np.array([
|
||||
self.capacity,
|
||||
np.average(self.weights),
|
||||
])
|
||||
|
||||
def get_variable_features(self, var, index):
|
||||
return np.array([
|
||||
self.weights[index],
|
||||
self.prices[index],
|
||||
])
|
||||
|
||||
|
||||
class GurobiKnapsackInstance(KnapsackInstance):
|
||||
"""
|
||||
Simpler (one-dimensional) knapsack instance, implemented directly in Gurobi
|
||||
instead of Pyomo, used for testing.
|
||||
"""
|
||||
def __init__(self, weights, prices, capacity):
|
||||
super().__init__(weights, prices, capacity)
|
||||
|
||||
def to_model(self):
|
||||
import gurobipy as gp
|
||||
from gurobipy import GRB
|
||||
|
||||
model = gp.Model("Knapsack")
|
||||
n = len(self.weights)
|
||||
x = model.addVars(n, vtype=GRB.BINARY, name="x")
|
||||
model.addConstr(gp.quicksum(x[i] * self.weights[i]
|
||||
for i in range(n)) <= self.capacity)
|
||||
model.setObjective(gp.quicksum(x[i] * self.prices[i]
|
||||
for i in range(n)), GRB.MAXIMIZE)
|
||||
return model
|
||||
130
miplearn/problems/stab.py
Normal file
130
miplearn/problems/stab.py
Normal file
@@ -0,0 +1,130 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
import numpy as np
|
||||
import pyomo.environ as pe
|
||||
import networkx as nx
|
||||
from miplearn import Instance
|
||||
import random
|
||||
from scipy.stats import uniform, randint, bernoulli
|
||||
from scipy.stats.distributions import rv_frozen
|
||||
|
||||
|
||||
class ChallengeA:
|
||||
def __init__(self,
|
||||
seed=42,
|
||||
n_training_instances=500,
|
||||
n_test_instances=50,
|
||||
):
|
||||
|
||||
np.random.seed(seed)
|
||||
self.generator = MaxWeightStableSetGenerator(w=uniform(loc=100., scale=50.),
|
||||
n=randint(low=200, high=201),
|
||||
p=uniform(loc=0.05, scale=0.0),
|
||||
fix_graph=True)
|
||||
|
||||
np.random.seed(seed + 1)
|
||||
self.training_instances = self.generator.generate(n_training_instances)
|
||||
|
||||
np.random.seed(seed + 2)
|
||||
self.test_instances = self.generator.generate(n_test_instances)
|
||||
|
||||
|
||||
class MaxWeightStableSetGenerator:
|
||||
"""Random instance generator for the Maximum-Weight Stable Set Problem.
|
||||
|
||||
The generator has two modes of operation. When `fix_graph=True` is provided, one random
|
||||
Erdős-Rényi graph $G_{n,p}$ is generated in the constructor, where $n$ and $p$ are sampled
|
||||
from user-provided probability distributions `n` and `p`. To generate each instance, the
|
||||
generator independently samples each $w_v$ from the user-provided probability distribution `w`.
|
||||
|
||||
When `fix_graph=False`, a new random graph is generated for each instance; the remaining
|
||||
parameters are sampled in the same way.
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
w=uniform(loc=10.0, scale=1.0),
|
||||
n=randint(low=250, high=251),
|
||||
p=uniform(loc=0.05, scale=0.0),
|
||||
fix_graph=True):
|
||||
"""Initialize the problem generator.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
w: rv_continuous
|
||||
Probability distribution for vertex weights.
|
||||
n: rv_discrete
|
||||
Probability distribution for parameter $n$ in Erdős-Rényi model.
|
||||
p: rv_continuous
|
||||
Probability distribution for parameter $p$ in Erdős-Rényi model.
|
||||
"""
|
||||
assert isinstance(w, rv_frozen), "w should be a SciPy probability distribution"
|
||||
assert isinstance(n, rv_frozen), "n should be a SciPy probability distribution"
|
||||
assert isinstance(p, rv_frozen), "p should be a SciPy probability distribution"
|
||||
self.w = w
|
||||
self.n = n
|
||||
self.p = p
|
||||
self.fix_graph = fix_graph
|
||||
self.graph = None
|
||||
if fix_graph:
|
||||
self.graph = self._generate_graph()
|
||||
|
||||
def generate(self, n_samples):
|
||||
def _sample():
|
||||
if self.graph is not None:
|
||||
graph = self.graph
|
||||
else:
|
||||
graph = self._generate_graph()
|
||||
weights = self.w.rvs(graph.number_of_nodes())
|
||||
return MaxWeightStableSetInstance(graph, weights)
|
||||
return [_sample() for _ in range(n_samples)]
|
||||
|
||||
def _generate_graph(self):
|
||||
return nx.generators.random_graphs.binomial_graph(self.n.rvs(), self.p.rvs())
|
||||
|
||||
|
||||
class MaxWeightStableSetInstance(Instance):
|
||||
"""An instance of the Maximum-Weight Stable Set Problem.
|
||||
|
||||
Given a graph G=(V,E) and a weight w_v for each vertex v, the problem asks for a stable
|
||||
set S of G maximizing sum(w_v for v in S). A stable set (also called independent set) is
|
||||
a subset of vertices, no two of which are adjacent.
|
||||
|
||||
This is one of Karp's 21 NP-complete problems.
|
||||
"""
|
||||
|
||||
def __init__(self, graph, weights):
|
||||
self.graph = graph
|
||||
self.weights = weights
|
||||
|
||||
def to_model(self):
|
||||
nodes = list(self.graph.nodes)
|
||||
model = pe.ConcreteModel()
|
||||
model.x = pe.Var(nodes, domain=pe.Binary)
|
||||
model.OBJ = pe.Objective(expr=sum(model.x[v] * self.weights[v] for v in nodes),
|
||||
sense=pe.maximize)
|
||||
model.clique_eqs = pe.ConstraintList()
|
||||
for clique in nx.find_cliques(self.graph):
|
||||
model.clique_eqs.add(sum(model.x[i] for i in clique) <= 1)
|
||||
return model
|
||||
|
||||
def get_instance_features(self):
|
||||
return np.ones(0)
|
||||
|
||||
def get_variable_features(self, var, index):
|
||||
neighbor_weights = [0] * 15
|
||||
neighbor_degrees = [100] * 15
|
||||
for n in self.graph.neighbors(index):
|
||||
neighbor_weights += [self.weights[n] / self.weights[index]]
|
||||
neighbor_degrees += [self.graph.degree(n) / self.graph.degree(index)]
|
||||
neighbor_weights.sort(reverse=True)
|
||||
neighbor_degrees.sort()
|
||||
features = []
|
||||
features += neighbor_weights[:5]
|
||||
features += neighbor_degrees[:5]
|
||||
features += [self.graph.degree(index)]
|
||||
return np.array(features)
|
||||
|
||||
def get_variable_category(self, var, index):
|
||||
return "default"
|
||||
4
miplearn/problems/tests/__init__.py
Normal file
4
miplearn/problems/tests/__init__.py
Normal file
@@ -0,0 +1,4 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
25
miplearn/problems/tests/test_knapsack.py
Normal file
25
miplearn/problems/tests/test_knapsack.py
Normal file
@@ -0,0 +1,25 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from miplearn import LearningSolver
|
||||
from miplearn.problems.knapsack import MultiKnapsackGenerator, MultiKnapsackInstance
|
||||
from scipy.stats import uniform, randint
|
||||
import numpy as np
|
||||
|
||||
|
||||
def test_knapsack_generator():
|
||||
gen = MultiKnapsackGenerator(n=randint(low=100, high=101),
|
||||
m=randint(low=30, high=31),
|
||||
w=randint(low=0, high=1000),
|
||||
K=randint(low=500, high=501),
|
||||
u=uniform(loc=1.0, scale=1.0),
|
||||
alpha=uniform(loc=0.50, scale=0.0),
|
||||
)
|
||||
instances = gen.generate(100)
|
||||
w_sum = sum(instance.weights for instance in instances) / len(instances)
|
||||
p_sum = sum(instance.prices for instance in instances) / len(instances)
|
||||
b_sum = sum(instance.capacities for instance in instances) / len(instances)
|
||||
assert round(np.mean(w_sum), -1) == 500.
|
||||
# assert round(np.mean(p_sum), -1) == 1200. # flaky
|
||||
assert round(np.mean(b_sum), -3) == 25000.
|
||||
46
miplearn/problems/tests/test_stab.py
Normal file
46
miplearn/problems/tests/test_stab.py
Normal file
@@ -0,0 +1,46 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
import networkx as nx
|
||||
import numpy as np
|
||||
from miplearn import LearningSolver
|
||||
from miplearn.problems.stab import MaxWeightStableSetInstance
|
||||
from scipy.stats import uniform, randint
|
||||
|
||||
|
||||
def test_stab():
|
||||
graph = nx.cycle_graph(5)
|
||||
weights = [1., 1., 1., 1., 1.]
|
||||
instance = MaxWeightStableSetInstance(graph, weights)
|
||||
solver = LearningSolver()
|
||||
solver.solve(instance)
|
||||
assert instance.lower_bound == 2.
|
||||
|
||||
|
||||
def test_stab_generator_fixed_graph():
|
||||
np.random.seed(42)
|
||||
from miplearn.problems.stab import MaxWeightStableSetGenerator
|
||||
gen = MaxWeightStableSetGenerator(w=uniform(loc=50., scale=10.),
|
||||
n=randint(low=10, high=11),
|
||||
p=uniform(loc=0.05, scale=0.),
|
||||
fix_graph=True)
|
||||
instances = gen.generate(1_000)
|
||||
weights = np.array([instance.weights for instance in instances])
|
||||
weights_avg_actual = np.round(np.average(weights, axis=0))
|
||||
weights_avg_expected = [55.0] * 10
|
||||
assert list(weights_avg_actual) == weights_avg_expected
|
||||
|
||||
|
||||
def test_stab_generator_random_graph():
|
||||
np.random.seed(42)
|
||||
from miplearn.problems.stab import MaxWeightStableSetGenerator
|
||||
gen = MaxWeightStableSetGenerator(w=uniform(loc=50., scale=10.),
|
||||
n=randint(low=30, high=41),
|
||||
p=uniform(loc=0.5, scale=0.),
|
||||
fix_graph=False)
|
||||
instances = gen.generate(1_000)
|
||||
n_nodes = [instance.graph.number_of_nodes() for instance in instances]
|
||||
n_edges = [instance.graph.number_of_edges() for instance in instances]
|
||||
assert np.round(np.mean(n_nodes)) == 35.
|
||||
assert np.round(np.mean(n_edges), -1) == 300.
|
||||
74
miplearn/problems/tests/test_tsp.py
Normal file
74
miplearn/problems/tests/test_tsp.py
Normal file
@@ -0,0 +1,74 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from miplearn import LearningSolver
|
||||
from miplearn.problems.tsp import TravelingSalesmanGenerator, TravelingSalesmanInstance
|
||||
import numpy as np
|
||||
from numpy.linalg import norm
|
||||
from scipy.spatial.distance import pdist, squareform
|
||||
from scipy.stats import uniform, randint
|
||||
|
||||
|
||||
def test_generator():
|
||||
instances = TravelingSalesmanGenerator(x=uniform(loc=0.0, scale=1000.0),
|
||||
y=uniform(loc=0.0, scale=1000.0),
|
||||
n=randint(low=100, high=101),
|
||||
gamma=uniform(loc=0.95, scale=0.1),
|
||||
fix_cities=True).generate(100)
|
||||
assert len(instances) == 100
|
||||
assert instances[0].n_cities == 100
|
||||
assert norm(instances[0].distances - instances[0].distances.T) < 1e-6
|
||||
d = [instance.distances[0,1] for instance in instances]
|
||||
assert np.std(d) > 0
|
||||
|
||||
|
||||
def test_instance():
|
||||
n_cities = 4
|
||||
distances = np.array([
|
||||
[0., 1., 2., 1.],
|
||||
[1., 0., 1., 2.],
|
||||
[2., 1., 0., 1.],
|
||||
[1., 2., 1., 0.],
|
||||
])
|
||||
instance = TravelingSalesmanInstance(n_cities, distances)
|
||||
for solver_name in ['gurobi', 'cplex']:
|
||||
solver = LearningSolver(solver=solver_name)
|
||||
solver.solve(instance)
|
||||
x = instance.solution["x"]
|
||||
assert x[0,1] == 1.0
|
||||
assert x[0,2] == 0.0
|
||||
assert x[0,3] == 1.0
|
||||
assert x[1,2] == 1.0
|
||||
assert x[1,3] == 0.0
|
||||
assert x[2,3] == 1.0
|
||||
assert instance.lower_bound == 4.0
|
||||
assert instance.upper_bound == 4.0
|
||||
|
||||
|
||||
def test_subtour():
|
||||
n_cities = 6
|
||||
cities = np.array([
|
||||
[0., 0.],
|
||||
[1., 0.],
|
||||
[2., 0.],
|
||||
[3., 0.],
|
||||
[0., 1.],
|
||||
[3., 1.],
|
||||
])
|
||||
distances = squareform(pdist(cities))
|
||||
instance = TravelingSalesmanInstance(n_cities, distances)
|
||||
for solver_name in ['gurobi', 'cplex']:
|
||||
solver = LearningSolver(solver=solver_name)
|
||||
solver.solve(instance)
|
||||
assert hasattr(instance, "found_violated_lazy_constraints")
|
||||
assert hasattr(instance, "found_violated_user_cuts")
|
||||
x = instance.solution["x"]
|
||||
assert x[0,1] == 1.0
|
||||
assert x[0,4] == 1.0
|
||||
assert x[1,2] == 1.0
|
||||
assert x[2,3] == 1.0
|
||||
assert x[3,5] == 1.0
|
||||
assert x[4,5] == 1.0
|
||||
solver.fit([instance])
|
||||
solver.solve(instance)
|
||||
175
miplearn/problems/tsp.py
Normal file
175
miplearn/problems/tsp.py
Normal file
@@ -0,0 +1,175 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
import numpy as np
|
||||
import pyomo.environ as pe
|
||||
from miplearn import Instance
|
||||
from scipy.stats import uniform, randint
|
||||
from scipy.spatial.distance import pdist, squareform
|
||||
from scipy.stats.distributions import rv_frozen
|
||||
import networkx as nx
|
||||
import random
|
||||
|
||||
|
||||
class ChallengeA:
|
||||
def __init__(self,
|
||||
seed=42,
|
||||
n_training_instances=500,
|
||||
n_test_instances=50,
|
||||
):
|
||||
|
||||
np.random.seed(seed)
|
||||
self.generator = TravelingSalesmanGenerator(x=uniform(loc=0.0, scale=1000.0),
|
||||
y=uniform(loc=0.0, scale=1000.0),
|
||||
n=randint(low=350, high=351),
|
||||
gamma=uniform(loc=0.95, scale=0.1),
|
||||
fix_cities=True,
|
||||
round=True,
|
||||
)
|
||||
|
||||
np.random.seed(seed + 1)
|
||||
self.training_instances = self.generator.generate(n_training_instances)
|
||||
|
||||
np.random.seed(seed + 2)
|
||||
self.test_instances = self.generator.generate(n_test_instances)
|
||||
|
||||
|
||||
class TravelingSalesmanGenerator:
|
||||
"""Random generator for the Traveling Salesman Problem."""
|
||||
|
||||
def __init__(self,
|
||||
x=uniform(loc=0.0, scale=1000.0),
|
||||
y=uniform(loc=0.0, scale=1000.0),
|
||||
n=randint(low=100, high=101),
|
||||
gamma=uniform(loc=1.0, scale=0.0),
|
||||
fix_cities=True,
|
||||
round=True,
|
||||
):
|
||||
"""Initializes the problem generator.
|
||||
|
||||
Initially, the generator creates n cities (x_1,y_1),...,(x_n,y_n) where n, x_i and y_i are
|
||||
sampled independently from the provided probability distributions `n`, `x` and `y`. For each
|
||||
(unordered) pair of cities (i,j), the distance d[i,j] between them is set to:
|
||||
|
||||
d[i,j] = gamma[i,j] \sqrt{(x_i - x_j)^2 + (y_i - y_j)^2}
|
||||
|
||||
where gamma is sampled from the provided probability distribution `gamma`.
|
||||
|
||||
If fix_cities=True, the list of cities is kept the same for all generated instances. The
|
||||
gamma values, and therefore also the distances, are still different.
|
||||
|
||||
By default, all distances d[i,j] are rounded to the nearest integer. If `round=False`
|
||||
is provided, this rounding will be disabled.
|
||||
|
||||
Arguments
|
||||
---------
|
||||
x: rv_continuous
|
||||
Probability distribution for the x-coordinate of each city.
|
||||
y: rv_continuous
|
||||
Probability distribution for the y-coordinate of each city.
|
||||
n: rv_discrete
|
||||
Probability distribution for the number of cities.
|
||||
fix_cities: bool
|
||||
If False, cities will be resampled for every generated instance. Otherwise, list of
|
||||
cities will be computed once, during the constructor.
|
||||
round: bool
|
||||
If True, distances are rounded to the nearest integer.
|
||||
"""
|
||||
assert isinstance(x, rv_frozen), "x should be a SciPy probability distribution"
|
||||
assert isinstance(y, rv_frozen), "y should be a SciPy probability distribution"
|
||||
assert isinstance(n, rv_frozen), "n should be a SciPy probability distribution"
|
||||
assert isinstance(gamma, rv_frozen), "gamma should be a SciPy probability distribution"
|
||||
self.x = x
|
||||
self.y = y
|
||||
self.n = n
|
||||
self.gamma = gamma
|
||||
self.round = round
|
||||
|
||||
if fix_cities:
|
||||
self.fixed_n, self.fixed_cities = self._generate_cities()
|
||||
else:
|
||||
self.fixed_n = None
|
||||
self.fixed_cities = None
|
||||
|
||||
def generate(self, n_samples):
|
||||
def _sample():
|
||||
if self.fixed_cities is not None:
|
||||
n, cities = self.fixed_n, self.fixed_cities
|
||||
else:
|
||||
n, cities = self._generate_cities()
|
||||
distances = squareform(pdist(cities)) * self.gamma.rvs(size=(n, n))
|
||||
distances = np.tril(distances) + np.triu(distances.T, 1)
|
||||
if self.round:
|
||||
distances = distances.round()
|
||||
return TravelingSalesmanInstance(n, distances)
|
||||
return [_sample() for _ in range(n_samples)]
|
||||
|
||||
def _generate_cities(self):
|
||||
n = self.n.rvs()
|
||||
cities = np.array([(self.x.rvs(), self.y.rvs()) for _ in range(n)])
|
||||
return n, cities
|
||||
|
||||
|
||||
class TravelingSalesmanInstance(Instance):
|
||||
"""An instance ot the Traveling Salesman Problem.
|
||||
|
||||
Given a list of cities and the distance between each pair of cities, the problem asks for the
|
||||
shortest route starting at the first city, visiting each other city exactly once, then
|
||||
returning to the first city. This problem is a generalization of the Hamiltonian path problem,
|
||||
one of Karp's 21 NP-complete problems.
|
||||
"""
|
||||
|
||||
def __init__(self, n_cities, distances):
|
||||
assert isinstance(distances, np.ndarray)
|
||||
assert distances.shape == (n_cities, n_cities)
|
||||
self.n_cities = n_cities
|
||||
self.distances = distances
|
||||
|
||||
def to_model(self):
|
||||
model = pe.ConcreteModel()
|
||||
model.edges = edges = [(i,j)
|
||||
for i in range(self.n_cities)
|
||||
for j in range(i+1, self.n_cities)]
|
||||
model.x = pe.Var(edges, domain=pe.Binary)
|
||||
model.obj = pe.Objective(expr=sum(model.x[i,j] * self.distances[i,j]
|
||||
for (i,j) in edges),
|
||||
sense=pe.minimize)
|
||||
model.eq_degree = pe.ConstraintList()
|
||||
model.eq_subtour = pe.ConstraintList()
|
||||
for i in range(self.n_cities):
|
||||
model.eq_degree.add(sum(model.x[min(i,j), max(i,j)]
|
||||
for j in range(self.n_cities) if i != j) == 2)
|
||||
return model
|
||||
|
||||
def get_instance_features(self):
|
||||
return np.array([1])
|
||||
|
||||
def get_variable_features(self, var_name, index):
|
||||
return np.array([1])
|
||||
|
||||
def get_variable_category(self, var_name, index):
|
||||
return index
|
||||
|
||||
def find_violated_lazy_constraints(self, model):
|
||||
selected_edges = [e for e in model.edges if model.x[e].value > 0.5]
|
||||
graph = nx.Graph()
|
||||
graph.add_edges_from(selected_edges)
|
||||
components = [frozenset(c) for c in list(nx.connected_components(graph))]
|
||||
violations = []
|
||||
for c in components:
|
||||
if len(c) < self.n_cities:
|
||||
violations += [c]
|
||||
return violations
|
||||
|
||||
def build_lazy_constraint(self, model, component):
|
||||
cut_edges = [e for e in model.edges
|
||||
if (e[0] in component and e[1] not in component) or
|
||||
(e[0] not in component and e[1] in component)]
|
||||
return model.eq_subtour.add(sum(model.x[e] for e in cut_edges) >= 2)
|
||||
|
||||
def find_violated_user_cuts(self, model):
|
||||
return self.find_violated_lazy_constraints(model)
|
||||
|
||||
def build_user_cut(self, model, violation):
|
||||
return self.build_lazy_constraint(model, violation)
|
||||
Reference in New Issue
Block a user