You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
MIPLearn/0.2/api/miplearn/problems/knapsack.html

881 lines
41 KiB

<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.problems.knapsack API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
<link href='https://cdnjs.cloudflare.com/ajax/libs/10up-sanitize.css/8.0.0/sanitize.min.css' rel='stylesheet'>
<link href="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/styles/github.min.css" rel="stylesheet">
<style>.flex{display:flex !important}body{line-height:1.5em}#content{padding:20px}#sidebar{padding:30px;overflow:hidden}.http-server-breadcrumbs{font-size:130%;margin:0 0 15px 0}#footer{font-size:.75em;padding:5px 30px;border-top:1px solid #ddd;text-align:right}#footer p{margin:0 0 0 1em;display:inline-block}#footer p:last-child{margin-right:30px}h1,h2,h3,h4,h5{font-weight:300}h1{font-size:2.5em;line-height:1.1em}h2{font-size:1.75em;margin:1em 0 .50em 0}h3{font-size:1.4em;margin:25px 0 10px 0}h4{margin:0;font-size:105%}a{color:#058;text-decoration:none;transition:color .3s ease-in-out}a:hover{color:#e82}.title code{font-weight:bold}h2[id^="header-"]{margin-top:2em}.ident{color:#900}pre code{background:#f8f8f8;font-size:.8em;line-height:1.4em}code{background:#f2f2f1;padding:1px 4px;overflow-wrap:break-word}h1 code{background:transparent}pre{background:#f8f8f8;border:0;border-top:1px solid #ccc;border-bottom:1px solid #ccc;margin:1em 0;padding:1ex}#http-server-module-list{display:flex;flex-flow:column}#http-server-module-list div{display:flex}#http-server-module-list dt{min-width:10%}#http-server-module-list p{margin-top:0}.toc ul,#index{list-style-type:none;margin:0;padding:0}#index code{background:transparent}#index h3{border-bottom:1px solid #ddd}#index ul{padding:0}#index h4{font-weight:bold}#index h4 + ul{margin-bottom:.6em}@media (min-width:200ex){#index .two-column{column-count:2}}@media (min-width:300ex){#index .two-column{column-count:3}}dl{margin-bottom:2em}dl dl:last-child{margin-bottom:4em}dd{margin:0 0 1em 3em}#header-classes + dl > dd{margin-bottom:3em}dd dd{margin-left:2em}dd p{margin:10px 0}.name{background:#eee;font-weight:bold;font-size:.85em;padding:5px 10px;display:inline-block;min-width:40%}.name:hover{background:#e0e0e0}.name > span:first-child{white-space:nowrap}.name.class > span:nth-child(2){margin-left:.4em}.inherited{color:#999;border-left:5px solid #eee;padding-left:1em}.inheritance em{font-style:normal;font-weight:bold}.desc h2{font-weight:400;font-size:1.25em}.desc h3{font-size:1em}.desc dt code{background:inherit}.source summary,.git-link-div{color:#666;text-align:right;font-weight:400;font-size:.8em;text-transform:uppercase}.source summary > *{white-space:nowrap;cursor:pointer}.git-link{color:inherit;margin-left:1em}.source pre{max-height:500px;overflow:auto;margin:0}.source pre code{font-size:12px;overflow:visible}.hlist{list-style:none}.hlist li{display:inline}.hlist li:after{content:',\2002'}.hlist li:last-child:after{content:none}.hlist .hlist{display:inline;padding-left:1em}img{max-width:100%}.admonition{padding:.1em .5em;margin-bottom:1em}.admonition-title{font-weight:bold}.admonition.note,.admonition.info,.admonition.important{background:#aef}.admonition.todo,.admonition.versionadded,.admonition.tip,.admonition.hint{background:#dfd}.admonition.warning,.admonition.versionchanged,.admonition.deprecated{background:#fd4}.admonition.error,.admonition.danger,.admonition.caution{background:lightpink}</style>
<style media="screen and (min-width: 700px)">@media screen and (min-width:700px){#sidebar{width:30%}#content{width:70%;max-width:100ch;padding:3em 4em;border-left:1px solid #ddd}pre code{font-size:1em}.item .name{font-size:1em}main{display:flex;flex-direction:row-reverse;justify-content:flex-end}.toc ul ul,#index ul{padding-left:1.5em}.toc > ul > li{margin-top:.5em}}</style>
<style media="print">@media print{#sidebar h1{page-break-before:always}.source{display:none}}@media print{*{background:transparent !important;color:#000 !important;box-shadow:none !important;text-shadow:none !important}a[href]:after{content:" (" attr(href) ")";font-size:90%}a[href][title]:after{content:none}abbr[title]:after{content:" (" attr(title) ")"}.ir a:after,a[href^="javascript:"]:after,a[href^="#"]:after{content:""}pre,blockquote{border:1px solid #999;page-break-inside:avoid}thead{display:table-header-group}tr,img{page-break-inside:avoid}img{max-width:100% !important}@page{margin:0.5cm}p,h2,h3{orphans:3;widows:3}h1,h2,h3,h4,h5,h6{page-break-after:avoid}}</style>
</head>
<body>
<main>
<article id="content">
<header>
<h1 class="title">Module <code>miplearn.problems.knapsack</code></h1>
</header>
<section id="section-intro">
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python"># MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
import numpy as np
import pyomo.environ as pe
from scipy.stats import uniform, randint
from scipy.stats.distributions import rv_frozen
from miplearn.instance import Instance
class ChallengeA:
&#34;&#34;&#34;
- 250 variables, 10 constraints, fixed weights
- w ~ U(0, 1000), jitter ~ U(0.95, 1.05)
- K = 500, u ~ U(0., 1.)
- alpha = 0.25
&#34;&#34;&#34;
def __init__(
self,
seed=42,
n_training_instances=500,
n_test_instances=50,
):
np.random.seed(seed)
self.gen = MultiKnapsackGenerator(
n=randint(low=250, high=251),
m=randint(low=10, high=11),
w=uniform(loc=0.0, scale=1000.0),
K=uniform(loc=500.0, scale=0.0),
u=uniform(loc=0.0, scale=1.0),
alpha=uniform(loc=0.25, scale=0.0),
fix_w=True,
w_jitter=uniform(loc=0.95, scale=0.1),
)
np.random.seed(seed + 1)
self.training_instances = self.gen.generate(n_training_instances)
np.random.seed(seed + 2)
self.test_instances = self.gen.generate(n_test_instances)
class MultiKnapsackInstance(Instance):
&#34;&#34;&#34;Representation of the Multidimensional 0-1 Knapsack Problem.
Given a set of n items and m knapsacks, the problem is to find a subset of items S maximizing
sum(prices[i] for i in S). If selected, each item i occupies weights[i,j] units of space in
each knapsack j. Furthermore, each knapsack j has limited storage space, given by capacities[j].
This implementation assigns a different category for each decision variable, and therefore
trains one ML model per variable. It is only suitable when training and test instances have
same size and items don&#39;t shuffle around.
&#34;&#34;&#34;
def __init__(self, prices, capacities, weights):
super().__init__()
assert isinstance(prices, np.ndarray)
assert isinstance(capacities, np.ndarray)
assert isinstance(weights, np.ndarray)
assert len(weights.shape) == 2
self.m, self.n = weights.shape
assert prices.shape == (self.n,)
assert capacities.shape == (self.m,)
self.prices = prices
self.capacities = capacities
self.weights = weights
def to_model(self):
model = pe.ConcreteModel()
model.x = pe.Var(range(self.n), domain=pe.Binary)
model.OBJ = pe.Objective(
rule=lambda model: sum(model.x[j] * self.prices[j] for j in range(self.n)),
sense=pe.maximize,
)
model.eq_capacity = pe.ConstraintList()
for i in range(self.m):
model.eq_capacity.add(
sum(model.x[j] * self.weights[i, j] for j in range(self.n))
&lt;= self.capacities[i]
)
return model
def get_instance_features(self):
return np.hstack(
[
np.mean(self.prices),
self.capacities,
]
)
def get_variable_features(self, var, index):
return np.hstack(
[
self.prices[index],
self.weights[:, index],
]
)
# def get_variable_category(self, var, index):
# return index
class MultiKnapsackGenerator:
def __init__(
self,
n=randint(low=100, high=101),
m=randint(low=30, high=31),
w=randint(low=0, high=1000),
K=randint(low=500, high=500),
u=uniform(loc=0.0, scale=1.0),
alpha=uniform(loc=0.25, scale=0.0),
fix_w=False,
w_jitter=uniform(loc=1.0, scale=0.0),
round=True,
):
&#34;&#34;&#34;Initialize the problem generator.
Instances have a random number of items (or variables) and a random number of knapsacks
(or constraints), as specified by the provided probability distributions `n` and `m`,
respectively. The weight of each item `i` on knapsack `j` is sampled independently from
the provided distribution `w`. The capacity of knapsack `j` is set to:
alpha_j * sum(w[i,j] for i in range(n)),
where `alpha_j`, the tightness ratio, is sampled from the provided probability
distribution `alpha`. To make the instances more challenging, the costs of the items
are linearly correlated to their average weights. More specifically, the weight of each
item `i` is set to:
sum(w[i,j]/m for j in range(m)) + K * u_i,
where `K`, the correlation coefficient, and `u_i`, the correlation multiplier, are sampled
from the provided probability distributions. Note that `K` is only sample once for the
entire instance.
If fix_w=True is provided, then w[i,j] are kept the same in all generated instances. This
also implies that n and m are kept fixed. Although the prices and capacities are derived
from w[i,j], as long as u and K are not constants, the generated instances will still not
be completely identical.
If a probability distribution w_jitter is provided, then item weights will be set to
w[i,j] * gamma[i,j] where gamma[i,j] is sampled from w_jitter. When combined with
fix_w=True, this argument may be used to generate instances where the weight of each item
is roughly the same, but not exactly identical, across all instances. The prices of the
items and the capacities of the knapsacks will be calculated as above, but using these
perturbed weights instead.
By default, all generated prices, weights and capacities are rounded to the nearest integer
number. If `round=False` is provided, this rounding will be disabled.
Parameters
----------
n: rv_discrete
Probability distribution for the number of items (or variables)
m: rv_discrete
Probability distribution for the number of knapsacks (or constraints)
w: rv_continuous
Probability distribution for the item weights
K: rv_continuous
Probability distribution for the profit correlation coefficient
u: rv_continuous
Probability distribution for the profit multiplier
alpha: rv_continuous
Probability distribution for the tightness ratio
fix_w: boolean
If true, weights are kept the same (minus the noise from w_jitter) in all instances
w_jitter: rv_continuous
Probability distribution for random noise added to the weights
round: boolean
If true, all prices, weights and capacities are rounded to the nearest integer
&#34;&#34;&#34;
assert isinstance(n, rv_frozen), &#34;n should be a SciPy probability distribution&#34;
assert isinstance(m, rv_frozen), &#34;m should be a SciPy probability distribution&#34;
assert isinstance(w, rv_frozen), &#34;w should be a SciPy probability distribution&#34;
assert isinstance(K, rv_frozen), &#34;K should be a SciPy probability distribution&#34;
assert isinstance(u, rv_frozen), &#34;u should be a SciPy probability distribution&#34;
assert isinstance(
alpha, rv_frozen
), &#34;alpha should be a SciPy probability distribution&#34;
assert isinstance(fix_w, bool), &#34;fix_w should be boolean&#34;
assert isinstance(
w_jitter, rv_frozen
), &#34;w_jitter should be a SciPy probability distribution&#34;
self.n = n
self.m = m
self.w = w
self.K = K
self.u = u
self.alpha = alpha
self.w_jitter = w_jitter
self.round = round
if fix_w:
self.fix_n = self.n.rvs()
self.fix_m = self.m.rvs()
self.fix_w = np.array([self.w.rvs(self.fix_n) for _ in range(self.fix_m)])
self.fix_u = self.u.rvs(self.fix_n)
self.fix_K = self.K.rvs()
else:
self.fix_n = None
self.fix_m = None
self.fix_w = None
self.fix_u = None
self.fix_K = None
def generate(self, n_samples):
def _sample():
if self.fix_w is not None:
n = self.fix_n
m = self.fix_m
w = self.fix_w
u = self.fix_u
K = self.fix_K
else:
n = self.n.rvs()
m = self.m.rvs()
w = np.array([self.w.rvs(n) for _ in range(m)])
u = self.u.rvs(n)
K = self.K.rvs()
w = w * np.array([self.w_jitter.rvs(n) for _ in range(m)])
alpha = self.alpha.rvs(m)
p = np.array([w[:, j].sum() / m + K * u[j] for j in range(n)])
b = np.array([w[i, :].sum() * alpha[i] for i in range(m)])
if self.round:
p = p.round()
b = b.round()
w = w.round()
return MultiKnapsackInstance(p, b, w)
return [_sample() for _ in range(n_samples)]
class KnapsackInstance(Instance):
&#34;&#34;&#34;
Simpler (one-dimensional) Knapsack Problem, used for testing.
&#34;&#34;&#34;
def __init__(self, weights, prices, capacity):
super().__init__()
self.weights = weights
self.prices = prices
self.capacity = capacity
def to_model(self):
model = pe.ConcreteModel()
items = range(len(self.weights))
model.x = pe.Var(items, domain=pe.Binary)
model.OBJ = pe.Objective(
expr=sum(model.x[v] * self.prices[v] for v in items), sense=pe.maximize
)
model.eq_capacity = pe.Constraint(
expr=sum(model.x[v] * self.weights[v] for v in items) &lt;= self.capacity
)
return model
def get_instance_features(self):
return np.array(
[
self.capacity,
np.average(self.weights),
]
)
def get_variable_features(self, var, index):
return np.array(
[
self.weights[index],
self.prices[index],
]
)
class GurobiKnapsackInstance(KnapsackInstance):
&#34;&#34;&#34;
Simpler (one-dimensional) knapsack instance, implemented directly in Gurobi
instead of Pyomo, used for testing.
&#34;&#34;&#34;
def __init__(self, weights, prices, capacity):
super().__init__(weights, prices, capacity)
def to_model(self):
import gurobipy as gp
from gurobipy import GRB
model = gp.Model(&#34;Knapsack&#34;)
n = len(self.weights)
x = model.addVars(n, vtype=GRB.BINARY, name=&#34;x&#34;)
model.addConstr(
gp.quicksum(x[i] * self.weights[i] for i in range(n)) &lt;= self.capacity,
&#34;eq_capacity&#34;,
)
model.setObjective(
gp.quicksum(x[i] * self.prices[i] for i in range(n)), GRB.MAXIMIZE
)
return model</code></pre>
</details>
</section>
<section>
</section>
<section>
</section>
<section>
</section>
<section>
<h2 class="section-title" id="header-classes">Classes</h2>
<dl>
<dt id="miplearn.problems.knapsack.ChallengeA"><code class="flex name class">
<span>class <span class="ident">ChallengeA</span></span>
<span>(</span><span>seed=42, n_training_instances=500, n_test_instances=50)</span>
</code></dt>
<dd>
<section class="desc"><ul>
<li>250 variables, 10 constraints, fixed weights</li>
<li>w ~ U(0, 1000), jitter ~ U(0.95, 1.05)</li>
<li>K = 500, u ~ U(0., 1.)</li>
<li>alpha = 0.25</li>
</ul></section>
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python">class ChallengeA:
&#34;&#34;&#34;
- 250 variables, 10 constraints, fixed weights
- w ~ U(0, 1000), jitter ~ U(0.95, 1.05)
- K = 500, u ~ U(0., 1.)
- alpha = 0.25
&#34;&#34;&#34;
def __init__(
self,
seed=42,
n_training_instances=500,
n_test_instances=50,
):
np.random.seed(seed)
self.gen = MultiKnapsackGenerator(
n=randint(low=250, high=251),
m=randint(low=10, high=11),
w=uniform(loc=0.0, scale=1000.0),
K=uniform(loc=500.0, scale=0.0),
u=uniform(loc=0.0, scale=1.0),
alpha=uniform(loc=0.25, scale=0.0),
fix_w=True,
w_jitter=uniform(loc=0.95, scale=0.1),
)
np.random.seed(seed + 1)
self.training_instances = self.gen.generate(n_training_instances)
np.random.seed(seed + 2)
self.test_instances = self.gen.generate(n_test_instances)</code></pre>
</details>
</dd>
<dt id="miplearn.problems.knapsack.GurobiKnapsackInstance"><code class="flex name class">
<span>class <span class="ident">GurobiKnapsackInstance</span></span>
<span>(</span><span>weights, prices, capacity)</span>
</code></dt>
<dd>
<section class="desc"><p>Simpler (one-dimensional) knapsack instance, implemented directly in Gurobi
instead of Pyomo, used for testing.</p></section>
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python">class GurobiKnapsackInstance(KnapsackInstance):
&#34;&#34;&#34;
Simpler (one-dimensional) knapsack instance, implemented directly in Gurobi
instead of Pyomo, used for testing.
&#34;&#34;&#34;
def __init__(self, weights, prices, capacity):
super().__init__(weights, prices, capacity)
def to_model(self):
import gurobipy as gp
from gurobipy import GRB
model = gp.Model(&#34;Knapsack&#34;)
n = len(self.weights)
x = model.addVars(n, vtype=GRB.BINARY, name=&#34;x&#34;)
model.addConstr(
gp.quicksum(x[i] * self.weights[i] for i in range(n)) &lt;= self.capacity,
&#34;eq_capacity&#34;,
)
model.setObjective(
gp.quicksum(x[i] * self.prices[i] for i in range(n)), GRB.MAXIMIZE
)
return model</code></pre>
</details>
<h3>Ancestors</h3>
<ul class="hlist">
<li><a title="miplearn.problems.knapsack.KnapsackInstance" href="#miplearn.problems.knapsack.KnapsackInstance">KnapsackInstance</a></li>
<li><a title="miplearn.instance.Instance" href="../instance.html#miplearn.instance.Instance">Instance</a></li>
<li>abc.ABC</li>
</ul>
<h3>Inherited members</h3>
<ul class="hlist">
<li><code><b><a title="miplearn.problems.knapsack.KnapsackInstance" href="#miplearn.problems.knapsack.KnapsackInstance">KnapsackInstance</a></b></code>:
<ul class="hlist">
<li><code><a title="miplearn.problems.knapsack.KnapsackInstance.build_lazy_constraint" href="../instance.html#miplearn.instance.Instance.build_lazy_constraint">build_lazy_constraint</a></code></li>
<li><code><a title="miplearn.problems.knapsack.KnapsackInstance.find_violated_lazy_constraints" href="../instance.html#miplearn.instance.Instance.find_violated_lazy_constraints">find_violated_lazy_constraints</a></code></li>
<li><code><a title="miplearn.problems.knapsack.KnapsackInstance.get_instance_features" href="../instance.html#miplearn.instance.Instance.get_instance_features">get_instance_features</a></code></li>
<li><code><a title="miplearn.problems.knapsack.KnapsackInstance.get_variable_category" href="../instance.html#miplearn.instance.Instance.get_variable_category">get_variable_category</a></code></li>
<li><code><a title="miplearn.problems.knapsack.KnapsackInstance.get_variable_features" href="../instance.html#miplearn.instance.Instance.get_variable_features">get_variable_features</a></code></li>
<li><code><a title="miplearn.problems.knapsack.KnapsackInstance.to_model" href="../instance.html#miplearn.instance.Instance.to_model">to_model</a></code></li>
</ul>
</li>
</ul>
</dd>
<dt id="miplearn.problems.knapsack.KnapsackInstance"><code class="flex name class">
<span>class <span class="ident">KnapsackInstance</span></span>
<span>(</span><span>weights, prices, capacity)</span>
</code></dt>
<dd>
<section class="desc"><p>Simpler (one-dimensional) Knapsack Problem, used for testing.</p></section>
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python">class KnapsackInstance(Instance):
&#34;&#34;&#34;
Simpler (one-dimensional) Knapsack Problem, used for testing.
&#34;&#34;&#34;
def __init__(self, weights, prices, capacity):
super().__init__()
self.weights = weights
self.prices = prices
self.capacity = capacity
def to_model(self):
model = pe.ConcreteModel()
items = range(len(self.weights))
model.x = pe.Var(items, domain=pe.Binary)
model.OBJ = pe.Objective(
expr=sum(model.x[v] * self.prices[v] for v in items), sense=pe.maximize
)
model.eq_capacity = pe.Constraint(
expr=sum(model.x[v] * self.weights[v] for v in items) &lt;= self.capacity
)
return model
def get_instance_features(self):
return np.array(
[
self.capacity,
np.average(self.weights),
]
)
def get_variable_features(self, var, index):
return np.array(
[
self.weights[index],
self.prices[index],
]
)</code></pre>
</details>
<h3>Ancestors</h3>
<ul class="hlist">
<li><a title="miplearn.instance.Instance" href="../instance.html#miplearn.instance.Instance">Instance</a></li>
<li>abc.ABC</li>
</ul>
<h3>Subclasses</h3>
<ul class="hlist">
<li><a title="miplearn.problems.knapsack.GurobiKnapsackInstance" href="#miplearn.problems.knapsack.GurobiKnapsackInstance">GurobiKnapsackInstance</a></li>
</ul>
<h3>Inherited members</h3>
<ul class="hlist">
<li><code><b><a title="miplearn.instance.Instance" href="../instance.html#miplearn.instance.Instance">Instance</a></b></code>:
<ul class="hlist">
<li><code><a title="miplearn.instance.Instance.build_lazy_constraint" href="../instance.html#miplearn.instance.Instance.build_lazy_constraint">build_lazy_constraint</a></code></li>
<li><code><a title="miplearn.instance.Instance.find_violated_lazy_constraints" href="../instance.html#miplearn.instance.Instance.find_violated_lazy_constraints">find_violated_lazy_constraints</a></code></li>
<li><code><a title="miplearn.instance.Instance.get_instance_features" href="../instance.html#miplearn.instance.Instance.get_instance_features">get_instance_features</a></code></li>
<li><code><a title="miplearn.instance.Instance.get_variable_category" href="../instance.html#miplearn.instance.Instance.get_variable_category">get_variable_category</a></code></li>
<li><code><a title="miplearn.instance.Instance.get_variable_features" href="../instance.html#miplearn.instance.Instance.get_variable_features">get_variable_features</a></code></li>
<li><code><a title="miplearn.instance.Instance.to_model" href="../instance.html#miplearn.instance.Instance.to_model">to_model</a></code></li>
</ul>
</li>
</ul>
</dd>
<dt id="miplearn.problems.knapsack.MultiKnapsackGenerator"><code class="flex name class">
<span>class <span class="ident">MultiKnapsackGenerator</span></span>
<span>(</span><span>n=&lt;scipy.stats._distn_infrastructure.rv_frozen object&gt;, m=&lt;scipy.stats._distn_infrastructure.rv_frozen object&gt;, w=&lt;scipy.stats._distn_infrastructure.rv_frozen object&gt;, K=&lt;scipy.stats._distn_infrastructure.rv_frozen object&gt;, u=&lt;scipy.stats._distn_infrastructure.rv_frozen object&gt;, alpha=&lt;scipy.stats._distn_infrastructure.rv_frozen object&gt;, fix_w=False, w_jitter=&lt;scipy.stats._distn_infrastructure.rv_frozen object&gt;, round=True)</span>
</code></dt>
<dd>
<section class="desc"><p>Initialize the problem generator.</p>
<p>Instances have a random number of items (or variables) and a random number of knapsacks
(or constraints), as specified by the provided probability distributions <code>n</code> and <code>m</code>,
respectively. The weight of each item <code>i</code> on knapsack <code>j</code> is sampled independently from
the provided distribution <code>w</code>. The capacity of knapsack <code>j</code> is set to:</p>
<pre><code>alpha_j * sum(w[i,j] for i in range(n)),
</code></pre>
<p>where <code>alpha_j</code>, the tightness ratio, is sampled from the provided probability
distribution <code>alpha</code>. To make the instances more challenging, the costs of the items
are linearly correlated to their average weights. More specifically, the weight of each
item <code>i</code> is set to:</p>
<pre><code>sum(w[i,j]/m for j in range(m)) + K * u_i,
</code></pre>
<p>where <code>K</code>, the correlation coefficient, and <code>u_i</code>, the correlation multiplier, are sampled
from the provided probability distributions. Note that <code>K</code> is only sample once for the
entire instance.</p>
<p>If fix_w=True is provided, then w[i,j] are kept the same in all generated instances. This
also implies that n and m are kept fixed. Although the prices and capacities are derived
from w[i,j], as long as u and K are not constants, the generated instances will still not
be completely identical.</p>
<p>If a probability distribution w_jitter is provided, then item weights will be set to
w[i,j] * gamma[i,j] where gamma[i,j] is sampled from w_jitter. When combined with
fix_w=True, this argument may be used to generate instances where the weight of each item
is roughly the same, but not exactly identical, across all instances. The prices of the
items and the capacities of the knapsacks will be calculated as above, but using these
perturbed weights instead.</p>
<p>By default, all generated prices, weights and capacities are rounded to the nearest integer
number. If <code>round=False</code> is provided, this rounding will be disabled.</p>
<h2 id="parameters">Parameters</h2>
<dl>
<dt><strong><code>n</code></strong> :&ensp;<code>rv_discrete</code></dt>
<dd>Probability distribution for the number of items (or variables)</dd>
<dt><strong><code>m</code></strong> :&ensp;<code>rv_discrete</code></dt>
<dd>Probability distribution for the number of knapsacks (or constraints)</dd>
<dt><strong><code>w</code></strong> :&ensp;<code>rv_continuous</code></dt>
<dd>Probability distribution for the item weights</dd>
<dt><strong><code>K</code></strong> :&ensp;<code>rv_continuous</code></dt>
<dd>Probability distribution for the profit correlation coefficient</dd>
<dt><strong><code>u</code></strong> :&ensp;<code>rv_continuous</code></dt>
<dd>Probability distribution for the profit multiplier</dd>
<dt><strong><code>alpha</code></strong> :&ensp;<code>rv_continuous</code></dt>
<dd>Probability distribution for the tightness ratio</dd>
<dt><strong><code>fix_w</code></strong> :&ensp;<code>boolean</code></dt>
<dd>If true, weights are kept the same (minus the noise from w_jitter) in all instances</dd>
<dt><strong><code>w_jitter</code></strong> :&ensp;<code>rv_continuous</code></dt>
<dd>Probability distribution for random noise added to the weights</dd>
<dt><strong><code>round</code></strong> :&ensp;<code>boolean</code></dt>
<dd>If true, all prices, weights and capacities are rounded to the nearest integer</dd>
</dl></section>
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python">class MultiKnapsackGenerator:
def __init__(
self,
n=randint(low=100, high=101),
m=randint(low=30, high=31),
w=randint(low=0, high=1000),
K=randint(low=500, high=500),
u=uniform(loc=0.0, scale=1.0),
alpha=uniform(loc=0.25, scale=0.0),
fix_w=False,
w_jitter=uniform(loc=1.0, scale=0.0),
round=True,
):
&#34;&#34;&#34;Initialize the problem generator.
Instances have a random number of items (or variables) and a random number of knapsacks
(or constraints), as specified by the provided probability distributions `n` and `m`,
respectively. The weight of each item `i` on knapsack `j` is sampled independently from
the provided distribution `w`. The capacity of knapsack `j` is set to:
alpha_j * sum(w[i,j] for i in range(n)),
where `alpha_j`, the tightness ratio, is sampled from the provided probability
distribution `alpha`. To make the instances more challenging, the costs of the items
are linearly correlated to their average weights. More specifically, the weight of each
item `i` is set to:
sum(w[i,j]/m for j in range(m)) + K * u_i,
where `K`, the correlation coefficient, and `u_i`, the correlation multiplier, are sampled
from the provided probability distributions. Note that `K` is only sample once for the
entire instance.
If fix_w=True is provided, then w[i,j] are kept the same in all generated instances. This
also implies that n and m are kept fixed. Although the prices and capacities are derived
from w[i,j], as long as u and K are not constants, the generated instances will still not
be completely identical.
If a probability distribution w_jitter is provided, then item weights will be set to
w[i,j] * gamma[i,j] where gamma[i,j] is sampled from w_jitter. When combined with
fix_w=True, this argument may be used to generate instances where the weight of each item
is roughly the same, but not exactly identical, across all instances. The prices of the
items and the capacities of the knapsacks will be calculated as above, but using these
perturbed weights instead.
By default, all generated prices, weights and capacities are rounded to the nearest integer
number. If `round=False` is provided, this rounding will be disabled.
Parameters
----------
n: rv_discrete
Probability distribution for the number of items (or variables)
m: rv_discrete
Probability distribution for the number of knapsacks (or constraints)
w: rv_continuous
Probability distribution for the item weights
K: rv_continuous
Probability distribution for the profit correlation coefficient
u: rv_continuous
Probability distribution for the profit multiplier
alpha: rv_continuous
Probability distribution for the tightness ratio
fix_w: boolean
If true, weights are kept the same (minus the noise from w_jitter) in all instances
w_jitter: rv_continuous
Probability distribution for random noise added to the weights
round: boolean
If true, all prices, weights and capacities are rounded to the nearest integer
&#34;&#34;&#34;
assert isinstance(n, rv_frozen), &#34;n should be a SciPy probability distribution&#34;
assert isinstance(m, rv_frozen), &#34;m should be a SciPy probability distribution&#34;
assert isinstance(w, rv_frozen), &#34;w should be a SciPy probability distribution&#34;
assert isinstance(K, rv_frozen), &#34;K should be a SciPy probability distribution&#34;
assert isinstance(u, rv_frozen), &#34;u should be a SciPy probability distribution&#34;
assert isinstance(
alpha, rv_frozen
), &#34;alpha should be a SciPy probability distribution&#34;
assert isinstance(fix_w, bool), &#34;fix_w should be boolean&#34;
assert isinstance(
w_jitter, rv_frozen
), &#34;w_jitter should be a SciPy probability distribution&#34;
self.n = n
self.m = m
self.w = w
self.K = K
self.u = u
self.alpha = alpha
self.w_jitter = w_jitter
self.round = round
if fix_w:
self.fix_n = self.n.rvs()
self.fix_m = self.m.rvs()
self.fix_w = np.array([self.w.rvs(self.fix_n) for _ in range(self.fix_m)])
self.fix_u = self.u.rvs(self.fix_n)
self.fix_K = self.K.rvs()
else:
self.fix_n = None
self.fix_m = None
self.fix_w = None
self.fix_u = None
self.fix_K = None
def generate(self, n_samples):
def _sample():
if self.fix_w is not None:
n = self.fix_n
m = self.fix_m
w = self.fix_w
u = self.fix_u
K = self.fix_K
else:
n = self.n.rvs()
m = self.m.rvs()
w = np.array([self.w.rvs(n) for _ in range(m)])
u = self.u.rvs(n)
K = self.K.rvs()
w = w * np.array([self.w_jitter.rvs(n) for _ in range(m)])
alpha = self.alpha.rvs(m)
p = np.array([w[:, j].sum() / m + K * u[j] for j in range(n)])
b = np.array([w[i, :].sum() * alpha[i] for i in range(m)])
if self.round:
p = p.round()
b = b.round()
w = w.round()
return MultiKnapsackInstance(p, b, w)
return [_sample() for _ in range(n_samples)]</code></pre>
</details>
<h3>Methods</h3>
<dl>
<dt id="miplearn.problems.knapsack.MultiKnapsackGenerator.generate"><code class="name flex">
<span>def <span class="ident">generate</span></span>(<span>self, n_samples)</span>
</code></dt>
<dd>
<section class="desc"></section>
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python">def generate(self, n_samples):
def _sample():
if self.fix_w is not None:
n = self.fix_n
m = self.fix_m
w = self.fix_w
u = self.fix_u
K = self.fix_K
else:
n = self.n.rvs()
m = self.m.rvs()
w = np.array([self.w.rvs(n) for _ in range(m)])
u = self.u.rvs(n)
K = self.K.rvs()
w = w * np.array([self.w_jitter.rvs(n) for _ in range(m)])
alpha = self.alpha.rvs(m)
p = np.array([w[:, j].sum() / m + K * u[j] for j in range(n)])
b = np.array([w[i, :].sum() * alpha[i] for i in range(m)])
if self.round:
p = p.round()
b = b.round()
w = w.round()
return MultiKnapsackInstance(p, b, w)
return [_sample() for _ in range(n_samples)]</code></pre>
</details>
</dd>
</dl>
</dd>
<dt id="miplearn.problems.knapsack.MultiKnapsackInstance"><code class="flex name class">
<span>class <span class="ident">MultiKnapsackInstance</span></span>
<span>(</span><span>prices, capacities, weights)</span>
</code></dt>
<dd>
<section class="desc"><p>Representation of the Multidimensional 0-1 Knapsack Problem.</p>
<p>Given a set of n items and m knapsacks, the problem is to find a subset of items S maximizing
sum(prices[i] for i in S). If selected, each item i occupies weights[i,j] units of space in
each knapsack j. Furthermore, each knapsack j has limited storage space, given by capacities[j].</p>
<p>This implementation assigns a different category for each decision variable, and therefore
trains one ML model per variable. It is only suitable when training and test instances have
same size and items don't shuffle around.</p></section>
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python">class MultiKnapsackInstance(Instance):
&#34;&#34;&#34;Representation of the Multidimensional 0-1 Knapsack Problem.
Given a set of n items and m knapsacks, the problem is to find a subset of items S maximizing
sum(prices[i] for i in S). If selected, each item i occupies weights[i,j] units of space in
each knapsack j. Furthermore, each knapsack j has limited storage space, given by capacities[j].
This implementation assigns a different category for each decision variable, and therefore
trains one ML model per variable. It is only suitable when training and test instances have
same size and items don&#39;t shuffle around.
&#34;&#34;&#34;
def __init__(self, prices, capacities, weights):
super().__init__()
assert isinstance(prices, np.ndarray)
assert isinstance(capacities, np.ndarray)
assert isinstance(weights, np.ndarray)
assert len(weights.shape) == 2
self.m, self.n = weights.shape
assert prices.shape == (self.n,)
assert capacities.shape == (self.m,)
self.prices = prices
self.capacities = capacities
self.weights = weights
def to_model(self):
model = pe.ConcreteModel()
model.x = pe.Var(range(self.n), domain=pe.Binary)
model.OBJ = pe.Objective(
rule=lambda model: sum(model.x[j] * self.prices[j] for j in range(self.n)),
sense=pe.maximize,
)
model.eq_capacity = pe.ConstraintList()
for i in range(self.m):
model.eq_capacity.add(
sum(model.x[j] * self.weights[i, j] for j in range(self.n))
&lt;= self.capacities[i]
)
return model
def get_instance_features(self):
return np.hstack(
[
np.mean(self.prices),
self.capacities,
]
)
def get_variable_features(self, var, index):
return np.hstack(
[
self.prices[index],
self.weights[:, index],
]
)</code></pre>
</details>
<h3>Ancestors</h3>
<ul class="hlist">
<li><a title="miplearn.instance.Instance" href="../instance.html#miplearn.instance.Instance">Instance</a></li>
<li>abc.ABC</li>
</ul>
<h3>Inherited members</h3>
<ul class="hlist">
<li><code><b><a title="miplearn.instance.Instance" href="../instance.html#miplearn.instance.Instance">Instance</a></b></code>:
<ul class="hlist">
<li><code><a title="miplearn.instance.Instance.build_lazy_constraint" href="../instance.html#miplearn.instance.Instance.build_lazy_constraint">build_lazy_constraint</a></code></li>
<li><code><a title="miplearn.instance.Instance.find_violated_lazy_constraints" href="../instance.html#miplearn.instance.Instance.find_violated_lazy_constraints">find_violated_lazy_constraints</a></code></li>
<li><code><a title="miplearn.instance.Instance.get_instance_features" href="../instance.html#miplearn.instance.Instance.get_instance_features">get_instance_features</a></code></li>
<li><code><a title="miplearn.instance.Instance.get_variable_category" href="../instance.html#miplearn.instance.Instance.get_variable_category">get_variable_category</a></code></li>
<li><code><a title="miplearn.instance.Instance.get_variable_features" href="../instance.html#miplearn.instance.Instance.get_variable_features">get_variable_features</a></code></li>
<li><code><a title="miplearn.instance.Instance.to_model" href="../instance.html#miplearn.instance.Instance.to_model">to_model</a></code></li>
</ul>
</li>
</ul>
</dd>
</dl>
</section>
</article>
<nav id="sidebar">
<h1>Index</h1>
<div class="toc">
<ul></ul>
</div>
<ul id="index">
<li><h3>Super-module</h3>
<ul>
<li><code><a title="miplearn.problems" href="index.html">miplearn.problems</a></code></li>
</ul>
</li>
<li><h3><a href="#header-classes">Classes</a></h3>
<ul>
<li>
<h4><code><a title="miplearn.problems.knapsack.ChallengeA" href="#miplearn.problems.knapsack.ChallengeA">ChallengeA</a></code></h4>
</li>
<li>
<h4><code><a title="miplearn.problems.knapsack.GurobiKnapsackInstance" href="#miplearn.problems.knapsack.GurobiKnapsackInstance">GurobiKnapsackInstance</a></code></h4>
</li>
<li>
<h4><code><a title="miplearn.problems.knapsack.KnapsackInstance" href="#miplearn.problems.knapsack.KnapsackInstance">KnapsackInstance</a></code></h4>
</li>
<li>
<h4><code><a title="miplearn.problems.knapsack.MultiKnapsackGenerator" href="#miplearn.problems.knapsack.MultiKnapsackGenerator">MultiKnapsackGenerator</a></code></h4>
<ul class="">
<li><code><a title="miplearn.problems.knapsack.MultiKnapsackGenerator.generate" href="#miplearn.problems.knapsack.MultiKnapsackGenerator.generate">generate</a></code></li>
</ul>
</li>
<li>
<h4><code><a title="miplearn.problems.knapsack.MultiKnapsackInstance" href="#miplearn.problems.knapsack.MultiKnapsackInstance">MultiKnapsackInstance</a></code></h4>
</li>
</ul>
</li>
</ul>
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>
</body>
</html>