Update 0.2 docs

docs
Alinson S. Xavier 5 years ago
parent 894f4b4668
commit 144523a5c0

@ -82,12 +82,6 @@
<li >
<a href="/benchmark/">Benchmark</a>
</li>
<li >
<a href="/problems/">Problems</a>
</li>

@ -82,12 +82,6 @@
<li >
<a href="../benchmark/">Benchmark</a>
</li>
<li >
<a href="../problems/">Problems</a>
</li>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.benchmark API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -30,40 +30,71 @@
import logging
import os
from copy import deepcopy
from typing import Dict, Union, List
import pandas as pd
from tqdm.auto import tqdm
from miplearn.instance import Instance
from miplearn.solvers.learning import LearningSolver
from miplearn.types import LearningSolveStats
class BenchmarkRunner:
def __init__(self, solvers):
assert isinstance(solvers, dict)
for solver in solvers.values():
assert isinstance(solver, LearningSolver)
self.solvers = solvers
self.results = None
def solve(self, instances, tee=False):
for (solver_name, solver) in self.solvers.items():
for i in tqdm(range(len((instances)))):
results = solver.solve(deepcopy(instances[i]), tee=tee)
self._push_result(
results,
solver=solver,
solver_name=solver_name,
instance=i,
&#34;&#34;&#34;
Utility class that simplifies the task of comparing the performance of different
solvers.
Example
-------
```python
benchmark = BenchmarkRunner({
&#34;Baseline&#34;: LearningSolver(...),
&#34;Strategy A&#34;: LearningSolver(...),
&#34;Strategy B&#34;: LearningSolver(...),
&#34;Strategy C&#34;: LearningSolver(...),
})
benchmark.fit(train_instances)
benchmark.parallel_solve(test_instances, n_jobs=5)
benchmark.save_results(&#34;result.csv&#34;)
```
Parameters
----------
solvers: Dict[str, LearningSolver]
Dictionary containing the solvers to compare. Solvers may have different
arguments and components. The key should be the name of the solver. It
appears in the exported tables of results.
&#34;&#34;&#34;
def __init__(self, solvers: Dict[str, LearningSolver]) -&gt; None:
self.solvers: Dict[str, LearningSolver] = solvers
self.results = pd.DataFrame(
columns=[
&#34;Solver&#34;,
&#34;Instance&#34;,
]
)
def parallel_solve(
self,
instances,
n_jobs=1,
n_trials=1,
index_offset=0,
):
instances: Union[List[str], List[Instance]],
n_jobs: int = 1,
n_trials: int = 3,
) -&gt; None:
&#34;&#34;&#34;
Solves the given instances in parallel and collect benchmark statistics.
Parameters
----------
instances: Union[List[str], List[Instance]]
List of instances to solve. This can either be a list of instances
already loaded in memory, or a list of filenames pointing to pickled (and
optionally gzipped) files.
n_jobs: int
List of instances to solve in parallel at a time.
n_trials: int
How many times each instance should be solved.
&#34;&#34;&#34;
self._silence_miplearn_logger()
trials = instances * n_trials
for (solver_name, solver) in self.solvers.items():
@ -74,69 +105,45 @@ class BenchmarkRunner:
discard_outputs=True,
)
for i in range(len(trials)):
idx = (i % len(instances)) + index_offset
self._push_result(
results[i],
solver=solver,
solver_name=solver_name,
instance=idx,
)
idx = i % len(instances)
results[i][&#34;Solver&#34;] = solver_name
results[i][&#34;Instance&#34;] = idx
self.results = self.results.append(pd.DataFrame([results[i]]))
self._restore_miplearn_logger()
def raw_results(self):
return self.results
def write_csv(self, filename: str) -&gt; None:
&#34;&#34;&#34;
Writes the collected results to a CSV file.
def save_results(self, filename):
Parameters
----------
filename: str
The name of the file.
&#34;&#34;&#34;
os.makedirs(os.path.dirname(filename), exist_ok=True)
self.results.to_csv(filename)
def load_results(self, filename):
self.results = pd.concat([self.results, pd.read_csv(filename, index_col=0)])
def fit(self, instances: Union[List[str], List[Instance]]) -&gt; None:
&#34;&#34;&#34;
Trains all solvers with the provided training instances.
def load_state(self, filename):
for (solver_name, solver) in self.solvers.items():
solver.load_state(filename)
Parameters
----------
instances: Union[List[str], List[Instance]]
List of training instances. This can either be a list of instances
already loaded in memory, or a list of filenames pointing to pickled (and
optionally gzipped) files.
def fit(self, training_instances):
&#34;&#34;&#34;
for (solver_name, solver) in self.solvers.items():
solver.fit(training_instances)
@staticmethod
def _compute_gap(ub, lb):
if lb is None or ub is None or lb * ub &lt; 0:
# solver did not find a solution and/or bound, use maximum gap possible
return 1.0
elif abs(ub - lb) &lt; 1e-6:
# avoid division by zero when ub = lb = 0
return 0.0
else:
# divide by max(abs(ub),abs(lb)) to ensure gap &lt;= 1
return (ub - lb) / max(abs(ub), abs(lb))
def _push_result(self, result, solver, solver_name, instance):
if self.results is None:
self.results = pd.DataFrame(
# Show the following columns first in the CSV file
columns=[
&#34;Solver&#34;,
&#34;Instance&#34;,
]
)
result[&#34;Solver&#34;] = solver_name
result[&#34;Instance&#34;] = instance
result[&#34;Gap&#34;] = self._compute_gap(
ub=result[&#34;Upper bound&#34;],
lb=result[&#34;Lower bound&#34;],
)
result[&#34;Mode&#34;] = solver.mode
self.results = self.results.append(pd.DataFrame([result]))
solver.fit(instances)
def _silence_miplearn_logger(self):
def _silence_miplearn_logger(self) -&gt; None:
miplearn_logger = logging.getLogger(&#34;miplearn&#34;)
self.prev_log_level = miplearn_logger.getEffectiveLevel()
miplearn_logger.setLevel(logging.WARNING)
def _restore_miplearn_logger(self):
def _restore_miplearn_logger(self) -&gt; None:
miplearn_logger = logging.getLogger(&#34;miplearn&#34;)
miplearn_logger.setLevel(self.prev_log_level)</code></pre>
</details>
@ -155,37 +162,86 @@ class BenchmarkRunner:
<span>(</span><span>solvers)</span>
</code></dt>
<dd>
<section class="desc"></section>
<section class="desc"><p>Utility class that simplifies the task of comparing the performance of different
solvers.</p>
<h2 id="example">Example</h2>
<pre><code class="language-python">benchmark = BenchmarkRunner({
&quot;Baseline&quot;: LearningSolver(...),
&quot;Strategy A&quot;: LearningSolver(...),
&quot;Strategy B&quot;: LearningSolver(...),
&quot;Strategy C&quot;: LearningSolver(...),
})
benchmark.fit(train_instances)
benchmark.parallel_solve(test_instances, n_jobs=5)
benchmark.save_results(&quot;result.csv&quot;)
</code></pre>
<h2 id="parameters">Parameters</h2>
<dl>
<dt><strong><code>solvers</code></strong> :&ensp;<code>Dict</code>[<code>str</code>, <code>LearningSolver</code>]</dt>
<dd>Dictionary containing the solvers to compare. Solvers may have different
arguments and components. The key should be the name of the solver. It
appears in the exported tables of results.</dd>
</dl></section>
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python">class BenchmarkRunner:
def __init__(self, solvers):
assert isinstance(solvers, dict)
for solver in solvers.values():
assert isinstance(solver, LearningSolver)
self.solvers = solvers
self.results = None
def solve(self, instances, tee=False):
for (solver_name, solver) in self.solvers.items():
for i in tqdm(range(len((instances)))):
results = solver.solve(deepcopy(instances[i]), tee=tee)
self._push_result(
results,
solver=solver,
solver_name=solver_name,
instance=i,
&#34;&#34;&#34;
Utility class that simplifies the task of comparing the performance of different
solvers.
Example
-------
```python
benchmark = BenchmarkRunner({
&#34;Baseline&#34;: LearningSolver(...),
&#34;Strategy A&#34;: LearningSolver(...),
&#34;Strategy B&#34;: LearningSolver(...),
&#34;Strategy C&#34;: LearningSolver(...),
})
benchmark.fit(train_instances)
benchmark.parallel_solve(test_instances, n_jobs=5)
benchmark.save_results(&#34;result.csv&#34;)
```
Parameters
----------
solvers: Dict[str, LearningSolver]
Dictionary containing the solvers to compare. Solvers may have different
arguments and components. The key should be the name of the solver. It
appears in the exported tables of results.
&#34;&#34;&#34;
def __init__(self, solvers: Dict[str, LearningSolver]) -&gt; None:
self.solvers: Dict[str, LearningSolver] = solvers
self.results = pd.DataFrame(
columns=[
&#34;Solver&#34;,
&#34;Instance&#34;,
]
)
def parallel_solve(
self,
instances,
n_jobs=1,
n_trials=1,
index_offset=0,
):
instances: Union[List[str], List[Instance]],
n_jobs: int = 1,
n_trials: int = 3,
) -&gt; None:
&#34;&#34;&#34;
Solves the given instances in parallel and collect benchmark statistics.
Parameters
----------
instances: Union[List[str], List[Instance]]
List of instances to solve. This can either be a list of instances
already loaded in memory, or a list of filenames pointing to pickled (and
optionally gzipped) files.
n_jobs: int
List of instances to solve in parallel at a time.
n_trials: int
How many times each instance should be solved.
&#34;&#34;&#34;
self._silence_miplearn_logger()
trials = instances * n_trials
for (solver_name, solver) in self.solvers.items():
@ -196,131 +252,122 @@ class BenchmarkRunner:
discard_outputs=True,
)
for i in range(len(trials)):
idx = (i % len(instances)) + index_offset
self._push_result(
results[i],
solver=solver,
solver_name=solver_name,
instance=idx,
)
idx = i % len(instances)
results[i][&#34;Solver&#34;] = solver_name
results[i][&#34;Instance&#34;] = idx
self.results = self.results.append(pd.DataFrame([results[i]]))
self._restore_miplearn_logger()
def raw_results(self):
return self.results
def write_csv(self, filename: str) -&gt; None:
&#34;&#34;&#34;
Writes the collected results to a CSV file.
def save_results(self, filename):
Parameters
----------
filename: str
The name of the file.
&#34;&#34;&#34;
os.makedirs(os.path.dirname(filename), exist_ok=True)
self.results.to_csv(filename)
def load_results(self, filename):
self.results = pd.concat([self.results, pd.read_csv(filename, index_col=0)])
def fit(self, instances: Union[List[str], List[Instance]]) -&gt; None:
&#34;&#34;&#34;
Trains all solvers with the provided training instances.
def load_state(self, filename):
for (solver_name, solver) in self.solvers.items():
solver.load_state(filename)
Parameters
----------
instances: Union[List[str], List[Instance]]
List of training instances. This can either be a list of instances
already loaded in memory, or a list of filenames pointing to pickled (and
optionally gzipped) files.
def fit(self, training_instances):
&#34;&#34;&#34;
for (solver_name, solver) in self.solvers.items():
solver.fit(training_instances)
@staticmethod
def _compute_gap(ub, lb):
if lb is None or ub is None or lb * ub &lt; 0:
# solver did not find a solution and/or bound, use maximum gap possible
return 1.0
elif abs(ub - lb) &lt; 1e-6:
# avoid division by zero when ub = lb = 0
return 0.0
else:
# divide by max(abs(ub),abs(lb)) to ensure gap &lt;= 1
return (ub - lb) / max(abs(ub), abs(lb))
def _push_result(self, result, solver, solver_name, instance):
if self.results is None:
self.results = pd.DataFrame(
# Show the following columns first in the CSV file
columns=[
&#34;Solver&#34;,
&#34;Instance&#34;,
]
)
result[&#34;Solver&#34;] = solver_name
result[&#34;Instance&#34;] = instance
result[&#34;Gap&#34;] = self._compute_gap(
ub=result[&#34;Upper bound&#34;],
lb=result[&#34;Lower bound&#34;],
)
result[&#34;Mode&#34;] = solver.mode
self.results = self.results.append(pd.DataFrame([result]))
solver.fit(instances)
def _silence_miplearn_logger(self):
def _silence_miplearn_logger(self) -&gt; None:
miplearn_logger = logging.getLogger(&#34;miplearn&#34;)
self.prev_log_level = miplearn_logger.getEffectiveLevel()
miplearn_logger.setLevel(logging.WARNING)
def _restore_miplearn_logger(self):
def _restore_miplearn_logger(self) -&gt; None:
miplearn_logger = logging.getLogger(&#34;miplearn&#34;)
miplearn_logger.setLevel(self.prev_log_level)</code></pre>
</details>
<h3>Methods</h3>
<dl>
<dt id="miplearn.benchmark.BenchmarkRunner.fit"><code class="name flex">
<span>def <span class="ident">fit</span></span>(<span>self, training_instances)</span>
<span>def <span class="ident">fit</span></span>(<span>self, instances)</span>
</code></dt>
<dd>
<section class="desc"></section>
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python">def fit(self, training_instances):
for (solver_name, solver) in self.solvers.items():
solver.fit(training_instances)</code></pre>
</details>
</dd>
<dt id="miplearn.benchmark.BenchmarkRunner.load_results"><code class="name flex">
<span>def <span class="ident">load_results</span></span>(<span>self, filename)</span>
</code></dt>
<dd>
<section class="desc"></section>
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python">def load_results(self, filename):
self.results = pd.concat([self.results, pd.read_csv(filename, index_col=0)])</code></pre>
</details>
</dd>
<dt id="miplearn.benchmark.BenchmarkRunner.load_state"><code class="name flex">
<span>def <span class="ident">load_state</span></span>(<span>self, filename)</span>
</code></dt>
<dd>
<section class="desc"></section>
<section class="desc"><p>Trains all solvers with the provided training instances.</p>
<h2 id="parameters">Parameters</h2>
<dl>
<dt><strong><code>instances</code></strong> :&ensp; <code>Union</code>[<code>List</code>[<code>str</code>], <code>List</code>[<code>Instance</code>]]</dt>
<dd>List of training instances. This can either be a list of instances
already loaded in memory, or a list of filenames pointing to pickled (and
optionally gzipped) files.</dd>
</dl></section>
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python">def load_state(self, filename):
<pre><code class="python">def fit(self, instances: Union[List[str], List[Instance]]) -&gt; None:
&#34;&#34;&#34;
Trains all solvers with the provided training instances.
Parameters
----------
instances: Union[List[str], List[Instance]]
List of training instances. This can either be a list of instances
already loaded in memory, or a list of filenames pointing to pickled (and
optionally gzipped) files.
&#34;&#34;&#34;
for (solver_name, solver) in self.solvers.items():
solver.load_state(filename)</code></pre>
solver.fit(instances)</code></pre>
</details>
</dd>
<dt id="miplearn.benchmark.BenchmarkRunner.parallel_solve"><code class="name flex">
<span>def <span class="ident">parallel_solve</span></span>(<span>self, instances, n_jobs=1, n_trials=1, index_offset=0)</span>
<span>def <span class="ident">parallel_solve</span></span>(<span>self, instances, n_jobs=1, n_trials=3)</span>
</code></dt>
<dd>
<section class="desc"></section>
<section class="desc"><p>Solves the given instances in parallel and collect benchmark statistics.</p>
<h2 id="parameters">Parameters</h2>
<dl>
<dt><strong><code>instances</code></strong> :&ensp;<code>Union</code>[<code>List</code>[<code>str</code>], <code>List</code>[<code>Instance</code>]]</dt>
<dd>List of instances to solve. This can either be a list of instances
already loaded in memory, or a list of filenames pointing to pickled (and
optionally gzipped) files.</dd>
<dt><strong><code>n_jobs</code></strong> :&ensp;<code>int</code></dt>
<dd>List of instances to solve in parallel at a time.</dd>
<dt><strong><code>n_trials</code></strong> :&ensp;<code>int</code></dt>
<dd>How many times each instance should be solved.</dd>
</dl></section>
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python">def parallel_solve(
self,
instances,
n_jobs=1,
n_trials=1,
index_offset=0,
):
instances: Union[List[str], List[Instance]],
n_jobs: int = 1,
n_trials: int = 3,
) -&gt; None:
&#34;&#34;&#34;
Solves the given instances in parallel and collect benchmark statistics.
Parameters
----------
instances: Union[List[str], List[Instance]]
List of instances to solve. This can either be a list of instances
already loaded in memory, or a list of filenames pointing to pickled (and
optionally gzipped) files.
n_jobs: int
List of instances to solve in parallel at a time.
n_trials: int
How many times each instance should be solved.
&#34;&#34;&#34;
self._silence_miplearn_logger()
trials = instances * n_trials
for (solver_name, solver) in self.solvers.items():
@ -331,64 +378,40 @@ class BenchmarkRunner:
discard_outputs=True,
)
for i in range(len(trials)):
idx = (i % len(instances)) + index_offset
self._push_result(
results[i],
solver=solver,
solver_name=solver_name,
instance=idx,
)
idx = i % len(instances)
results[i][&#34;Solver&#34;] = solver_name
results[i][&#34;Instance&#34;] = idx
self.results = self.results.append(pd.DataFrame([results[i]]))
self._restore_miplearn_logger()</code></pre>
</details>
</dd>
<dt id="miplearn.benchmark.BenchmarkRunner.raw_results"><code class="name flex">
<span>def <span class="ident">raw_results</span></span>(<span>self)</span>
</code></dt>
<dd>
<section class="desc"></section>
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python">def raw_results(self):
return self.results</code></pre>
</details>
</dd>
<dt id="miplearn.benchmark.BenchmarkRunner.save_results"><code class="name flex">
<span>def <span class="ident">save_results</span></span>(<span>self, filename)</span>
<dt id="miplearn.benchmark.BenchmarkRunner.write_csv"><code class="name flex">
<span>def <span class="ident">write_csv</span></span>(<span>self, filename)</span>
</code></dt>
<dd>
<section class="desc"></section>
<section class="desc"><p>Writes the collected results to a CSV file.</p>
<h2 id="parameters">Parameters</h2>
<dl>
<dt><strong><code>filename</code></strong> :&ensp;<code>str</code></dt>
<dd>The name of the file.</dd>
</dl></section>
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python">def save_results(self, filename):
<pre><code class="python">def write_csv(self, filename: str) -&gt; None:
&#34;&#34;&#34;
Writes the collected results to a CSV file.
Parameters
----------
filename: str
The name of the file.
&#34;&#34;&#34;
os.makedirs(os.path.dirname(filename), exist_ok=True)
self.results.to_csv(filename)</code></pre>
</details>
</dd>
<dt id="miplearn.benchmark.BenchmarkRunner.solve"><code class="name flex">
<span>def <span class="ident">solve</span></span>(<span>self, instances, tee=False)</span>
</code></dt>
<dd>
<section class="desc"></section>
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python">def solve(self, instances, tee=False):
for (solver_name, solver) in self.solvers.items():
for i in tqdm(range(len((instances)))):
results = solver.solve(deepcopy(instances[i]), tee=tee)
self._push_result(
results,
solver=solver,
solver_name=solver_name,
instance=i,
)</code></pre>
</details>
</dd>
</dl>
</dd>
</dl>
@ -409,14 +432,10 @@ class BenchmarkRunner:
<ul>
<li>
<h4><code><a title="miplearn.benchmark.BenchmarkRunner" href="#miplearn.benchmark.BenchmarkRunner">BenchmarkRunner</a></code></h4>
<ul class="two-column">
<ul class="">
<li><code><a title="miplearn.benchmark.BenchmarkRunner.fit" href="#miplearn.benchmark.BenchmarkRunner.fit">fit</a></code></li>
<li><code><a title="miplearn.benchmark.BenchmarkRunner.load_results" href="#miplearn.benchmark.BenchmarkRunner.load_results">load_results</a></code></li>
<li><code><a title="miplearn.benchmark.BenchmarkRunner.load_state" href="#miplearn.benchmark.BenchmarkRunner.load_state">load_state</a></code></li>
<li><code><a title="miplearn.benchmark.BenchmarkRunner.parallel_solve" href="#miplearn.benchmark.BenchmarkRunner.parallel_solve">parallel_solve</a></code></li>
<li><code><a title="miplearn.benchmark.BenchmarkRunner.raw_results" href="#miplearn.benchmark.BenchmarkRunner.raw_results">raw_results</a></code></li>
<li><code><a title="miplearn.benchmark.BenchmarkRunner.save_results" href="#miplearn.benchmark.BenchmarkRunner.save_results">save_results</a></code></li>
<li><code><a title="miplearn.benchmark.BenchmarkRunner.solve" href="#miplearn.benchmark.BenchmarkRunner.solve">solve</a></code></li>
<li><code><a title="miplearn.benchmark.BenchmarkRunner.write_csv" href="#miplearn.benchmark.BenchmarkRunner.write_csv">write_csv</a></code></li>
</ul>
</li>
</ul>
@ -425,7 +444,7 @@ class BenchmarkRunner:
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.classifiers.adaptive API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -106,7 +106,7 @@ class AdaptiveClassifier(Classifier):
<dl>
<dt id="miplearn.classifiers.adaptive.AdaptiveClassifier"><code class="flex name class">
<span>class <span class="ident">AdaptiveClassifier</span></span>
<span>(</span><span>candidates=None, evaluator=<miplearn.classifiers.evaluator.ClassifierEvaluator object>)</span>
<span>(</span><span>candidates=None, evaluator=&lt;miplearn.classifiers.evaluator.ClassifierEvaluator object&gt;)</span>
</code></dt>
<dd>
<section class="desc"><p>A meta-classifier which dynamically selects what actual classifier to use
@ -241,7 +241,7 @@ based on its cross-validation score on a particular training data set.</p>
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.classifiers.counting API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -159,7 +159,7 @@ counts how many times each label appeared, hence the name.</p></section>
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.classifiers.cv API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -308,7 +308,7 @@ acceptable. Other numbers are a linear interpolation of these two extremes.</p><
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.classifiers.evaluator API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -115,7 +115,7 @@ class ClassifierEvaluator:
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.classifiers API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -97,7 +97,6 @@ class Regressor(ABC):
<dl>
<dt id="miplearn.classifiers.Classifier"><code class="flex name class">
<span>class <span class="ident">Classifier</span></span>
<span>(</span><span>*args, **kwargs)</span>
</code></dt>
<dd>
<section class="desc"><p>Helper class that provides a standard way to create an ABC using
@ -127,8 +126,8 @@ inheritance.</p></section>
</ul>
<h3>Subclasses</h3>
<ul class="hlist">
<li><a title="miplearn.classifiers.counting.CountingClassifier" href="counting.html#miplearn.classifiers.counting.CountingClassifier">CountingClassifier</a></li>
<li><a title="miplearn.classifiers.adaptive.AdaptiveClassifier" href="adaptive.html#miplearn.classifiers.adaptive.AdaptiveClassifier">AdaptiveClassifier</a></li>
<li><a title="miplearn.classifiers.counting.CountingClassifier" href="counting.html#miplearn.classifiers.counting.CountingClassifier">CountingClassifier</a></li>
<li><a title="miplearn.classifiers.cv.CrossValidatedClassifier" href="cv.html#miplearn.classifiers.cv.CrossValidatedClassifier">CrossValidatedClassifier</a></li>
</ul>
<h3>Methods</h3>
@ -181,7 +180,6 @@ def predict_proba(self, x_test):
</dd>
<dt id="miplearn.classifiers.Regressor"><code class="flex name class">
<span>class <span class="ident">Regressor</span></span>
<span>(</span><span>*args, **kwargs)</span>
</code></dt>
<dd>
<section class="desc"><p>Helper class that provides a standard way to create an ABC using
@ -282,7 +280,7 @@ def predict(self):
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.classifiers.tests API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -80,7 +80,7 @@
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.classifiers.tests.test_counting API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -93,7 +93,7 @@ def test_counting():
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.classifiers.tests.test_cv API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -153,7 +153,7 @@ def test_cv():
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.classifiers.tests.test_evaluator API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -99,7 +99,7 @@ def test_evaluator():
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.classifiers.tests.test_threshold API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -133,7 +133,7 @@ def test_threshold_dynamic():
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.classifiers.threshold API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -93,7 +93,6 @@ class MinPrecisionThreshold(DynamicThreshold):
<dl>
<dt id="miplearn.classifiers.threshold.DynamicThreshold"><code class="flex name class">
<span>class <span class="ident">DynamicThreshold</span></span>
<span>(</span><span>*args, **kwargs)</span>
</code></dt>
<dd>
<section class="desc"><p>Helper class that provides a standard way to create an ABC using
@ -238,7 +237,7 @@ positive rate (also known as precision).</p></section>
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.components.component API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -32,7 +32,7 @@ from abc import ABC, abstractmethod
from typing import Any, List, Union, TYPE_CHECKING
from miplearn.instance import Instance
from miplearn.types import MIPSolveStats, TrainingSample
from miplearn.types import LearningSolveStats, TrainingSample
if TYPE_CHECKING:
from miplearn.solvers.learning import LearningSolver
@ -73,7 +73,7 @@ class Component(ABC):
solver: &#34;LearningSolver&#34;,
instance: Instance,
model: Any,
stats: MIPSolveStats,
stats: LearningSolveStats,
training_data: TrainingSample,
) -&gt; None:
&#34;&#34;&#34;
@ -87,13 +87,13 @@ class Component(ABC):
The instance being solved.
model: Any
The concrete optimization model being solved.
stats: dict
stats: LearningSolveStats
A dictionary containing statistics about the solution process, such as
number of nodes explored and running time. Components are free to add
their own statistics here. For example, PrimalSolutionComponent adds
statistics regarding the number of predicted variables. All statistics in
this dictionary are exported to the benchmark CSV file.
training_data: dict
training_data: TrainingSample
A dictionary containing data that may be useful for training machine
learning models and accelerating the solution process. Components are
free to add their own training data here. For example,
@ -156,7 +156,6 @@ class Component(ABC):
<dl>
<dt id="miplearn.components.component.Component"><code class="flex name class">
<span>class <span class="ident">Component</span></span>
<span>(</span><span>*args, **kwargs)</span>
</code></dt>
<dd>
<section class="desc"><p>A Component is an object which adds functionality to a LearningSolver.</p>
@ -202,7 +201,7 @@ strategy.</p></section>
solver: &#34;LearningSolver&#34;,
instance: Instance,
model: Any,
stats: MIPSolveStats,
stats: LearningSolveStats,
training_data: TrainingSample,
) -&gt; None:
&#34;&#34;&#34;
@ -216,13 +215,13 @@ strategy.</p></section>
The instance being solved.
model: Any
The concrete optimization model being solved.
stats: dict
stats: LearningSolveStats
A dictionary containing statistics about the solution process, such as
number of nodes explored and running time. Components are free to add
their own statistics here. For example, PrimalSolutionComponent adds
statistics regarding the number of predicted variables. All statistics in
this dictionary are exported to the benchmark CSV file.
training_data: dict
training_data: TrainingSample
A dictionary containing data that may be useful for training machine
learning models and accelerating the solution process. Components are
free to add their own training data here. For example,
@ -279,16 +278,16 @@ strategy.</p></section>
</ul>
<h3>Subclasses</h3>
<ul class="hlist">
<li><a title="miplearn.components.composite.CompositeComponent" href="composite.html#miplearn.components.composite.CompositeComponent">CompositeComponent</a></li>
<li><a title="miplearn.components.cuts.UserCutsComponent" href="cuts.html#miplearn.components.cuts.UserCutsComponent">UserCutsComponent</a></li>
<li><a title="miplearn.components.lazy_dynamic.DynamicLazyConstraintsComponent" href="lazy_dynamic.html#miplearn.components.lazy_dynamic.DynamicLazyConstraintsComponent">DynamicLazyConstraintsComponent</a></li>
<li><a title="miplearn.components.lazy_static.StaticLazyConstraintsComponent" href="lazy_static.html#miplearn.components.lazy_static.StaticLazyConstraintsComponent">StaticLazyConstraintsComponent</a></li>
<li><a title="miplearn.components.objective.ObjectiveValueComponent" href="objective.html#miplearn.components.objective.ObjectiveValueComponent">ObjectiveValueComponent</a></li>
<li><a title="miplearn.components.primal.PrimalSolutionComponent" href="primal.html#miplearn.components.primal.PrimalSolutionComponent">PrimalSolutionComponent</a></li>
<li><a title="miplearn.components.lazy_static.StaticLazyConstraintsComponent" href="lazy_static.html#miplearn.components.lazy_static.StaticLazyConstraintsComponent">StaticLazyConstraintsComponent</a></li>
<li><a title="miplearn.components.composite.CompositeComponent" href="composite.html#miplearn.components.composite.CompositeComponent">CompositeComponent</a></li>
<li><a title="miplearn.components.steps.drop_redundant.DropRedundantInequalitiesStep" href="steps/drop_redundant.html#miplearn.components.steps.drop_redundant.DropRedundantInequalitiesStep">DropRedundantInequalitiesStep</a></li>
<li><a title="miplearn.components.relaxation.RelaxationComponent" href="relaxation.html#miplearn.components.relaxation.RelaxationComponent">RelaxationComponent</a></li>
<li><a title="miplearn.components.steps.convert_tight.ConvertTightIneqsIntoEqsStep" href="steps/convert_tight.html#miplearn.components.steps.convert_tight.ConvertTightIneqsIntoEqsStep">ConvertTightIneqsIntoEqsStep</a></li>
<li><a title="miplearn.components.steps.drop_redundant.DropRedundantInequalitiesStep" href="steps/drop_redundant.html#miplearn.components.steps.drop_redundant.DropRedundantInequalitiesStep">DropRedundantInequalitiesStep</a></li>
<li><a title="miplearn.components.steps.relax_integrality.RelaxIntegralityStep" href="steps/relax_integrality.html#miplearn.components.steps.relax_integrality.RelaxIntegralityStep">RelaxIntegralityStep</a></li>
<li><a title="miplearn.components.relaxation.RelaxationComponent" href="relaxation.html#miplearn.components.relaxation.RelaxationComponent">RelaxationComponent</a></li>
</ul>
<h3>Methods</h3>
<dl>
@ -305,13 +304,13 @@ strategy.</p></section>
<dd>The instance being solved.</dd>
<dt><strong><code>model</code></strong> :&ensp;<code>Any</code></dt>
<dd>The concrete optimization model being solved.</dd>
<dt><strong><code>stats</code></strong> :&ensp;<code>dict</code></dt>
<dt><strong><code>stats</code></strong> :&ensp;<code>LearningSolveStats</code></dt>
<dd>A dictionary containing statistics about the solution process, such as
number of nodes explored and running time. Components are free to add
their own statistics here. For example, PrimalSolutionComponent adds
statistics regarding the number of predicted variables. All statistics in
this dictionary are exported to the benchmark CSV file.</dd>
<dt><strong><code>training_data</code></strong> :&ensp;<code>dict</code></dt>
<dt><strong><code>training_data</code></strong> :&ensp;<code>TrainingSample</code></dt>
<dd>A dictionary containing data that may be useful for training machine
learning models and accelerating the solution process. Components are
free to add their own training data here. For example,
@ -328,7 +327,7 @@ def after_solve(
solver: &#34;LearningSolver&#34;,
instance: Instance,
model: Any,
stats: MIPSolveStats,
stats: LearningSolveStats,
training_data: TrainingSample,
) -&gt; None:
&#34;&#34;&#34;
@ -342,13 +341,13 @@ def after_solve(
The instance being solved.
model: Any
The concrete optimization model being solved.
stats: dict
stats: LearningSolveStats
A dictionary containing statistics about the solution process, such as
number of nodes explored and running time. Components are free to add
their own statistics here. For example, PrimalSolutionComponent adds
statistics regarding the number of predicted variables. All statistics in
this dictionary are exported to the benchmark CSV file.
training_data: dict
training_data: TrainingSample
A dictionary containing data that may be useful for training machine
learning models and accelerating the solution process. Components are
free to add their own training data here. For example,
@ -518,7 +517,7 @@ ends. If it retunrs True for any component, the MIP is solved again.</p>
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.components.composite API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -226,7 +226,7 @@ RelaxationComponent for a concrete example.</p>
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.components.cuts API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -378,7 +378,7 @@ class UserCutsComponent(Component):
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.components API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -203,7 +203,7 @@ def classifier_evaluation_dict(tp, tn, fp, fn):
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.components.lazy_dynamic API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -402,7 +402,7 @@ class DynamicLazyConstraintsComponent(Component):
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.components.lazy_static API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -616,7 +616,7 @@ strategy.</p></section>
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.components.objective API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -384,7 +384,7 @@ class ObjectiveValueComponent(Component):
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.components.primal API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -214,7 +214,7 @@ class PrimalSolutionComponent(Component):
<dl>
<dt id="miplearn.components.primal.PrimalSolutionComponent"><code class="flex name class">
<span>class <span class="ident">PrimalSolutionComponent</span></span>
<span>(</span><span>classifier=<miplearn.classifiers.adaptive.AdaptiveClassifier object>, mode='exact', threshold=<miplearn.classifiers.threshold.MinPrecisionThreshold object>)</span>
<span>(</span><span>classifier=&lt;miplearn.classifiers.adaptive.AdaptiveClassifier object&gt;, mode='exact', threshold=&lt;miplearn.classifiers.threshold.MinPrecisionThreshold object&gt;)</span>
</code></dt>
<dd>
<section class="desc"><p>A component that predicts primal solutions.</p></section>
@ -602,7 +602,7 @@ class PrimalSolutionComponent(Component):
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.components.relaxation API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -318,7 +318,7 @@ constraint loop.</dd>
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.components.steps.convert_tight API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -627,7 +627,7 @@ before this component is used.</p></section>
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.components.steps.drop_redundant API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -655,7 +655,7 @@ before this component is used.</p></section>
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.components.steps API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -72,7 +72,7 @@
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.components.steps.relax_integrality API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -66,7 +66,6 @@ class RelaxIntegralityStep(Component):
<dl>
<dt id="miplearn.components.steps.relax_integrality.RelaxIntegralityStep"><code class="flex name class">
<span>class <span class="ident">RelaxIntegralityStep</span></span>
<span>(</span><span>*args, **kwargs)</span>
</code></dt>
<dd>
<section class="desc"><p>Component that relaxes all integrality constraints before the problem is solved.</p></section>
@ -134,7 +133,7 @@ class RelaxIntegralityStep(Component):
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.components.steps.tests API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -62,7 +62,7 @@
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.components.steps.tests.test_convert_tight API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -377,7 +377,7 @@ features, which can be provided as inputs to machine learning models.</p></secti
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.components.steps.tests.test_drop_redundant API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -756,7 +756,7 @@ def test_x_multiple_solves():
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.components.tests API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -85,7 +85,7 @@
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.components.tests.test_composite API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -171,7 +171,7 @@ def test_composite():
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.components.tests.test_lazy_dynamic API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -357,7 +357,7 @@ def test_lazy_evaluate():
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.components.tests.test_lazy_static API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -530,7 +530,7 @@ def test_fit():
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.components.tests.test_objective API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -166,7 +166,7 @@ def test_obj_evaluate():
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.components.tests.test_primal API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -298,7 +298,7 @@ def test_primal_parallel_fit():
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.extractors API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -196,7 +196,6 @@ class ObjectiveValueExtractor(Extractor):
<dl>
<dt id="miplearn.extractors.Extractor"><code class="flex name class">
<span>class <span class="ident">Extractor</span></span>
<span>(</span><span>*args, **kwargs)</span>
</code></dt>
<dd>
<section class="desc"><p>Helper class that provides a standard way to create an ABC using
@ -230,10 +229,10 @@ inheritance.</p></section>
</ul>
<h3>Subclasses</h3>
<ul class="hlist">
<li><a title="miplearn.extractors.VariableFeaturesExtractor" href="#miplearn.extractors.VariableFeaturesExtractor">VariableFeaturesExtractor</a></li>
<li><a title="miplearn.extractors.SolutionExtractor" href="#miplearn.extractors.SolutionExtractor">SolutionExtractor</a></li>
<li><a title="miplearn.extractors.InstanceFeaturesExtractor" href="#miplearn.extractors.InstanceFeaturesExtractor">InstanceFeaturesExtractor</a></li>
<li><a title="miplearn.extractors.ObjectiveValueExtractor" href="#miplearn.extractors.ObjectiveValueExtractor">ObjectiveValueExtractor</a></li>
<li><a title="miplearn.extractors.SolutionExtractor" href="#miplearn.extractors.SolutionExtractor">SolutionExtractor</a></li>
<li><a title="miplearn.extractors.VariableFeaturesExtractor" href="#miplearn.extractors.VariableFeaturesExtractor">VariableFeaturesExtractor</a></li>
</ul>
<h3>Static methods</h3>
<dl>
@ -282,7 +281,6 @@ def extract(self, instances):
</dd>
<dt id="miplearn.extractors.InstanceFeaturesExtractor"><code class="flex name class">
<span>class <span class="ident">InstanceFeaturesExtractor</span></span>
<span>(</span><span>*args, **kwargs)</span>
</code></dt>
<dd>
<section class="desc"><p>Helper class that provides a standard way to create an ABC using
@ -541,7 +539,6 @@ inheritance.</p></section>
</dd>
<dt id="miplearn.extractors.VariableFeaturesExtractor"><code class="flex name class">
<span>class <span class="ident">VariableFeaturesExtractor</span></span>
<span>(</span><span>*args, **kwargs)</span>
</code></dt>
<dd>
<section class="desc"><p>Helper class that provides a standard way to create an ABC using
@ -672,7 +669,7 @@ inheritance.</p></section>
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -134,7 +134,7 @@ from .solvers.pyomo.gurobi import GurobiPyomoSolver</code></pre>
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.instance API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -373,13 +373,13 @@ features, which can be provided as inputs to machine learning models.</p></secti
</ul>
<h3>Subclasses</h3>
<ul class="hlist">
<li><a title="miplearn.problems.knapsack.MultiKnapsackInstance" href="problems/knapsack.html#miplearn.problems.knapsack.MultiKnapsackInstance">MultiKnapsackInstance</a></li>
<li><a title="miplearn.components.steps.tests.test_convert_tight.SampleInstance" href="components/steps/tests/test_convert_tight.html#miplearn.components.steps.tests.test_convert_tight.SampleInstance">SampleInstance</a></li>
<li><a title="miplearn.problems.knapsack.KnapsackInstance" href="problems/knapsack.html#miplearn.problems.knapsack.KnapsackInstance">KnapsackInstance</a></li>
<li><a title="miplearn.solvers.tests.InfeasiblePyomoInstance" href="solvers/tests/index.html#miplearn.solvers.tests.InfeasiblePyomoInstance">InfeasiblePyomoInstance</a></li>
<li><a title="miplearn.solvers.tests.InfeasibleGurobiInstance" href="solvers/tests/index.html#miplearn.solvers.tests.InfeasibleGurobiInstance">InfeasibleGurobiInstance</a></li>
<li><a title="miplearn.problems.knapsack.MultiKnapsackInstance" href="problems/knapsack.html#miplearn.problems.knapsack.MultiKnapsackInstance">MultiKnapsackInstance</a></li>
<li><a title="miplearn.problems.stab.MaxWeightStableSetInstance" href="problems/stab.html#miplearn.problems.stab.MaxWeightStableSetInstance">MaxWeightStableSetInstance</a></li>
<li><a title="miplearn.problems.tsp.TravelingSalesmanInstance" href="problems/tsp.html#miplearn.problems.tsp.TravelingSalesmanInstance">TravelingSalesmanInstance</a></li>
<li><a title="miplearn.components.steps.tests.test_convert_tight.SampleInstance" href="components/steps/tests/test_convert_tight.html#miplearn.components.steps.tests.test_convert_tight.SampleInstance">SampleInstance</a></li>
<li><a title="miplearn.solvers.tests.InfeasibleGurobiInstance" href="solvers/tests/index.html#miplearn.solvers.tests.InfeasibleGurobiInstance">InfeasibleGurobiInstance</a></li>
<li><a title="miplearn.solvers.tests.InfeasiblePyomoInstance" href="solvers/tests/index.html#miplearn.solvers.tests.InfeasiblePyomoInstance">InfeasiblePyomoInstance</a></li>
</ul>
<h3>Methods</h3>
<dl>
@ -767,7 +767,7 @@ def to_model(self) -&gt; Any:
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.log API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -286,7 +286,7 @@ it is formatted using formatException() and appended to the message.</p></sectio
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.problems API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -80,7 +80,7 @@
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.problems.knapsack API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -515,7 +515,7 @@ instead of Pyomo, used for testing.</p></section>
</dd>
<dt id="miplearn.problems.knapsack.MultiKnapsackGenerator"><code class="flex name class">
<span>class <span class="ident">MultiKnapsackGenerator</span></span>
<span>(</span><span>n=<scipy.stats._distn_infrastructure.rv_frozen object>, m=<scipy.stats._distn_infrastructure.rv_frozen object>, w=<scipy.stats._distn_infrastructure.rv_frozen object>, K=<scipy.stats._distn_infrastructure.rv_frozen object>, u=<scipy.stats._distn_infrastructure.rv_frozen object>, alpha=<scipy.stats._distn_infrastructure.rv_frozen object>, fix_w=False, w_jitter=<scipy.stats._distn_infrastructure.rv_frozen object>, round=True)</span>
<span>(</span><span>n=&lt;scipy.stats._distn_infrastructure.rv_frozen object&gt;, m=&lt;scipy.stats._distn_infrastructure.rv_frozen object&gt;, w=&lt;scipy.stats._distn_infrastructure.rv_frozen object&gt;, K=&lt;scipy.stats._distn_infrastructure.rv_frozen object&gt;, u=&lt;scipy.stats._distn_infrastructure.rv_frozen object&gt;, alpha=&lt;scipy.stats._distn_infrastructure.rv_frozen object&gt;, fix_w=False, w_jitter=&lt;scipy.stats._distn_infrastructure.rv_frozen object&gt;, round=True)</span>
</code></dt>
<dd>
<section class="desc"><p>Initialize the problem generator.</p>
@ -873,7 +873,7 @@ same size and items don't shuffle around.</p></section>
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.problems.stab API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -208,7 +208,7 @@ class MaxWeightStableSetInstance(Instance):
</dd>
<dt id="miplearn.problems.stab.MaxWeightStableSetGenerator"><code class="flex name class">
<span>class <span class="ident">MaxWeightStableSetGenerator</span></span>
<span>(</span><span>w=<scipy.stats._distn_infrastructure.rv_frozen object>, n=<scipy.stats._distn_infrastructure.rv_frozen object>, p=<scipy.stats._distn_infrastructure.rv_frozen object>, fix_graph=True)</span>
<span>(</span><span>w=&lt;scipy.stats._distn_infrastructure.rv_frozen object&gt;, n=&lt;scipy.stats._distn_infrastructure.rv_frozen object&gt;, p=&lt;scipy.stats._distn_infrastructure.rv_frozen object&gt;, fix_graph=True)</span>
</code></dt>
<dd>
<section class="desc"><p>Random instance generator for the Maximum-Weight Stable Set Problem.</p>
@ -426,7 +426,7 @@ a subset of vertices, no two of which are adjacent.</p>
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.problems.tests API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -75,7 +75,7 @@
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.problems.tests.test_knapsack API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -107,7 +107,7 @@ def test_knapsack_generator():
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.problems.tests.test_stab API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -183,7 +183,7 @@ def test_stab_generator_random_graph():
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.problems.tests.test_tsp API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -234,7 +234,7 @@ def test_subtour():
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.problems.tsp API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -264,7 +264,7 @@ class TravelingSalesmanInstance(Instance):
</dd>
<dt id="miplearn.problems.tsp.TravelingSalesmanGenerator"><code class="flex name class">
<span>class <span class="ident">TravelingSalesmanGenerator</span></span>
<span>(</span><span>x=<scipy.stats._distn_infrastructure.rv_frozen object>, y=<scipy.stats._distn_infrastructure.rv_frozen object>, n=<scipy.stats._distn_infrastructure.rv_frozen object>, gamma=<scipy.stats._distn_infrastructure.rv_frozen object>, fix_cities=True, round=True)</span>
<span>(</span><span>x=&lt;scipy.stats._distn_infrastructure.rv_frozen object&gt;, y=&lt;scipy.stats._distn_infrastructure.rv_frozen object&gt;, n=&lt;scipy.stats._distn_infrastructure.rv_frozen object&gt;, gamma=&lt;scipy.stats._distn_infrastructure.rv_frozen object&gt;, fix_cities=True, round=True)</span>
</code></dt>
<dd>
<section class="desc"><p>Random generator for the Traveling Salesman Problem.</p>
@ -579,7 +579,7 @@ one of Karp's 21 NP-complete problems.</p></section>
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.solvers.gurobi API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -920,7 +920,7 @@ LP relaxation of that node.</dd>
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.solvers API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -115,7 +115,7 @@ class _RedirectOutput:
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.solvers.internal API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -303,7 +303,6 @@ class InternalSolver(ABC):
<dl>
<dt id="miplearn.solvers.internal.InternalSolver"><code class="flex name class">
<span>class <span class="ident">InternalSolver</span></span>
<span>(</span><span>*args, **kwargs)</span>
</code></dt>
<dd>
<section class="desc"><p>Abstract class representing the MIP solver used internally by LearningSolver.</p></section>
@ -561,8 +560,8 @@ class InternalSolver(ABC):
</ul>
<h3>Subclasses</h3>
<ul class="hlist">
<li><a title="miplearn.solvers.pyomo.base.BasePyomoSolver" href="pyomo/base.html#miplearn.solvers.pyomo.base.BasePyomoSolver">BasePyomoSolver</a></li>
<li><a title="miplearn.solvers.gurobi.GurobiSolver" href="gurobi.html#miplearn.solvers.gurobi.GurobiSolver">GurobiSolver</a></li>
<li><a title="miplearn.solvers.pyomo.base.BasePyomoSolver" href="pyomo/base.html#miplearn.solvers.pyomo.base.BasePyomoSolver">BasePyomoSolver</a></li>
</ul>
<h3>Methods</h3>
<dl>
@ -1134,7 +1133,7 @@ def solve_lp(
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.solvers.learning API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -46,7 +46,7 @@ from miplearn.instance import Instance
from miplearn.solvers import _RedirectOutput
from miplearn.solvers.internal import InternalSolver
from miplearn.solvers.pyomo.gurobi import GurobiPyomoSolver
from miplearn.types import MIPSolveStats, TrainingSample
from miplearn.types import MIPSolveStats, TrainingSample, LearningSolveStats
logger = logging.getLogger(__name__)
@ -153,7 +153,7 @@ class LearningSolver:
output_filename: Optional[str] = None,
discard_output: bool = False,
tee: bool = False,
) -&gt; MIPSolveStats:
) -&gt; LearningSolveStats:
# Load instance from file, if necessary
filename = None
@ -229,15 +229,24 @@ class LearningSolver:
# Solve MILP
logger.info(&#34;Solving MILP...&#34;)
stats = self.internal_solver.solve(
stats = cast(
LearningSolveStats,
self.internal_solver.solve(
tee=tee,
iteration_cb=iteration_cb_wrapper,
lazy_cb=lazy_cb,
),
)
if &#34;LP value&#34; in training_sample.keys():
stats[&#34;LP value&#34;] = training_sample[&#34;LP value&#34;]
stats[&#34;Solver&#34;] = &#34;default&#34;
stats[&#34;Gap&#34;] = self._compute_gap(
ub=stats[&#34;Upper bound&#34;],
lb=stats[&#34;Lower bound&#34;],
)
stats[&#34;Mode&#34;] = self.mode
# Read MIP solution and bounds
# Add some information to training_sample
training_sample[&#34;Lower bound&#34;] = stats[&#34;Lower bound&#34;]
training_sample[&#34;Upper bound&#34;] = stats[&#34;Upper bound&#34;]
training_sample[&#34;MIP log&#34;] = stats[&#34;Log&#34;]
@ -268,7 +277,7 @@ class LearningSolver:
output_filename: Optional[str] = None,
discard_output: bool = False,
tee: bool = False,
) -&gt; MIPSolveStats:
) -&gt; LearningSolveStats:
&#34;&#34;&#34;
Solves the given instance. If trained machine-learning models are
available, they will be used to accelerate the solution process.
@ -301,7 +310,7 @@ class LearningSolver:
Returns
-------
MIPSolveStats
LearningSolveStats
A dictionary of solver statistics containing at least the following
keys: &#34;Lower bound&#34;, &#34;Upper bound&#34;, &#34;Wallclock time&#34;, &#34;Nodes&#34;,
&#34;Sense&#34;, &#34;Log&#34;, &#34;Warm start value&#34; and &#34;LP value&#34;.
@ -337,7 +346,7 @@ class LearningSolver:
label: str = &#34;Solve&#34;,
output_filenames: Optional[List[str]] = None,
discard_outputs: bool = False,
) -&gt; List[MIPSolveStats]:
) -&gt; List[LearningSolveStats]:
&#34;&#34;&#34;
Solves multiple instances in parallel.
@ -364,7 +373,7 @@ class LearningSolver:
Returns
-------
List[MIPSolveStats]
List[LearningSolveStats]
List of solver statistics, with one entry for each provided instance.
The list is the same you would obtain by calling
`[solver.solve(p) for p in instances]`
@ -409,7 +418,19 @@ class LearningSolver:
def __getstate__(self) -&gt; Dict:
self.internal_solver = None
return self.__dict__</code></pre>
return self.__dict__
@staticmethod
def _compute_gap(ub: Optional[float], lb: Optional[float]) -&gt; Optional[float]:
if lb is None or ub is None or lb * ub &lt; 0:
# solver did not find a solution and/or bound
return None
elif abs(ub - lb) &lt; 1e-6:
# avoid division by zero when ub = lb = 0
return 0.0
else:
# divide by max(abs(ub),abs(lb)) to ensure gap &lt;= 1
return (ub - lb) / max(abs(ub), abs(lb))</code></pre>
</details>
</section>
<section>
@ -531,7 +552,7 @@ the theoretical performance of perfect ML models.</dd>
output_filename: Optional[str] = None,
discard_output: bool = False,
tee: bool = False,
) -&gt; MIPSolveStats:
) -&gt; LearningSolveStats:
# Load instance from file, if necessary
filename = None
@ -607,15 +628,24 @@ the theoretical performance of perfect ML models.</dd>
# Solve MILP
logger.info(&#34;Solving MILP...&#34;)
stats = self.internal_solver.solve(
stats = cast(
LearningSolveStats,
self.internal_solver.solve(
tee=tee,
iteration_cb=iteration_cb_wrapper,
lazy_cb=lazy_cb,
),
)
if &#34;LP value&#34; in training_sample.keys():
stats[&#34;LP value&#34;] = training_sample[&#34;LP value&#34;]
stats[&#34;Solver&#34;] = &#34;default&#34;
stats[&#34;Gap&#34;] = self._compute_gap(
ub=stats[&#34;Upper bound&#34;],
lb=stats[&#34;Lower bound&#34;],
)
stats[&#34;Mode&#34;] = self.mode
# Read MIP solution and bounds
# Add some information to training_sample
training_sample[&#34;Lower bound&#34;] = stats[&#34;Lower bound&#34;]
training_sample[&#34;Upper bound&#34;] = stats[&#34;Upper bound&#34;]
training_sample[&#34;MIP log&#34;] = stats[&#34;Log&#34;]
@ -646,7 +676,7 @@ the theoretical performance of perfect ML models.</dd>
output_filename: Optional[str] = None,
discard_output: bool = False,
tee: bool = False,
) -&gt; MIPSolveStats:
) -&gt; LearningSolveStats:
&#34;&#34;&#34;
Solves the given instance. If trained machine-learning models are
available, they will be used to accelerate the solution process.
@ -679,7 +709,7 @@ the theoretical performance of perfect ML models.</dd>
Returns
-------
MIPSolveStats
LearningSolveStats
A dictionary of solver statistics containing at least the following
keys: &#34;Lower bound&#34;, &#34;Upper bound&#34;, &#34;Wallclock time&#34;, &#34;Nodes&#34;,
&#34;Sense&#34;, &#34;Log&#34;, &#34;Warm start value&#34; and &#34;LP value&#34;.
@ -715,7 +745,7 @@ the theoretical performance of perfect ML models.</dd>
label: str = &#34;Solve&#34;,
output_filenames: Optional[List[str]] = None,
discard_outputs: bool = False,
) -&gt; List[MIPSolveStats]:
) -&gt; List[LearningSolveStats]:
&#34;&#34;&#34;
Solves multiple instances in parallel.
@ -742,7 +772,7 @@ the theoretical performance of perfect ML models.</dd>
Returns
-------
List[MIPSolveStats]
List[LearningSolveStats]
List of solver statistics, with one entry for each provided instance.
The list is the same you would obtain by calling
`[solver.solve(p) for p in instances]`
@ -787,7 +817,19 @@ the theoretical performance of perfect ML models.</dd>
def __getstate__(self) -&gt; Dict:
self.internal_solver = None
return self.__dict__</code></pre>
return self.__dict__
@staticmethod
def _compute_gap(ub: Optional[float], lb: Optional[float]) -&gt; Optional[float]:
if lb is None or ub is None or lb * ub &lt; 0:
# solver did not find a solution and/or bound
return None
elif abs(ub - lb) &lt; 1e-6:
# avoid division by zero when ub = lb = 0
return 0.0
else:
# divide by max(abs(ub),abs(lb)) to ensure gap &lt;= 1
return (ub - lb) / max(abs(ub), abs(lb))</code></pre>
</details>
<h3>Methods</h3>
<dl>
@ -808,7 +850,7 @@ the theoretical performance of perfect ML models.</dd>
</details>
</dd>
<dt id="miplearn.solvers.learning.LearningSolver.parallel_solve"><code class="name flex">
<span>def <span class="ident">parallel_solve</span></span>(<span>self, instances, n_jobs=4, label=&#39;Solve&#39;, output_filenames=None, discard_outputs=False)</span>
<span>def <span class="ident">parallel_solve</span></span>(<span>self, instances, n_jobs=4, label='Solve', output_filenames=None, discard_outputs=False)</span>
</code></dt>
<dd>
<section class="desc"><p>Solves multiple instances in parallel.</p>
@ -834,7 +876,7 @@ them instead. Useful during benchmarking.</dd>
</dl>
<h2 id="returns">Returns</h2>
<dl>
<dt><code>List</code>[<code>MIPSolveStats</code>]</dt>
<dt><code>List</code>[<code>LearningSolveStats</code>]</dt>
<dd>List of solver statistics, with one entry for each provided instance.
The list is the same you would obtain by calling
<code>[solver.solve(p) for p in instances]</code></dd>
@ -850,7 +892,7 @@ The list is the same you would obtain by calling
label: str = &#34;Solve&#34;,
output_filenames: Optional[List[str]] = None,
discard_outputs: bool = False,
) -&gt; List[MIPSolveStats]:
) -&gt; List[LearningSolveStats]:
&#34;&#34;&#34;
Solves multiple instances in parallel.
@ -877,7 +919,7 @@ The list is the same you would obtain by calling
Returns
-------
List[MIPSolveStats]
List[LearningSolveStats]
List of solver statistics, with one entry for each provided instance.
The list is the same you would obtain by calling
`[solver.solve(p) for p in instances]`
@ -933,7 +975,7 @@ them. Useful during benchmarking.</dd>
</dl>
<h2 id="returns">Returns</h2>
<dl>
<dt><code>MIPSolveStats</code></dt>
<dt><code>LearningSolveStats</code></dt>
<dd>
<p>A dictionary of solver statistics containing at least the following
keys: "Lower bound", "Upper bound", "Wallclock time", "Nodes",
@ -955,7 +997,7 @@ details.</p>
output_filename: Optional[str] = None,
discard_output: bool = False,
tee: bool = False,
) -&gt; MIPSolveStats:
) -&gt; LearningSolveStats:
&#34;&#34;&#34;
Solves the given instance. If trained machine-learning models are
available, they will be used to accelerate the solution process.
@ -988,7 +1030,7 @@ details.</p>
Returns
-------
MIPSolveStats
LearningSolveStats
A dictionary of solver statistics containing at least the following
keys: &#34;Lower bound&#34;, &#34;Upper bound&#34;, &#34;Wallclock time&#34;, &#34;Nodes&#34;,
&#34;Sense&#34;, &#34;Log&#34;, &#34;Warm start value&#34; and &#34;LP value&#34;.
@ -1050,7 +1092,7 @@ details.</p>
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.solvers.pyomo.base API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -665,8 +665,8 @@ class BasePyomoSolver(InternalSolver):
</ul>
<h3>Subclasses</h3>
<ul class="hlist">
<li><a title="miplearn.solvers.pyomo.gurobi.GurobiPyomoSolver" href="gurobi.html#miplearn.solvers.pyomo.gurobi.GurobiPyomoSolver">GurobiPyomoSolver</a></li>
<li><a title="miplearn.solvers.pyomo.cplex.CplexPyomoSolver" href="cplex.html#miplearn.solvers.pyomo.cplex.CplexPyomoSolver">CplexPyomoSolver</a></li>
<li><a title="miplearn.solvers.pyomo.gurobi.GurobiPyomoSolver" href="gurobi.html#miplearn.solvers.pyomo.gurobi.GurobiPyomoSolver">GurobiPyomoSolver</a></li>
<li><a title="miplearn.solvers.pyomo.xpress.XpressPyomoSolver" href="xpress.html#miplearn.solvers.pyomo.xpress.XpressPyomoSolver">XpressPyomoSolver</a></li>
</ul>
<h3>Inherited members</h3>
@ -722,7 +722,7 @@ class BasePyomoSolver(InternalSolver):
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.solvers.pyomo.cplex API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -185,7 +185,7 @@ class CplexPyomoSolver(BasePyomoSolver):
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.solvers.pyomo.gurobi API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -213,7 +213,7 @@ class GurobiPyomoSolver(BasePyomoSolver):
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.solvers.pyomo API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -80,7 +80,7 @@
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.solvers.pyomo.xpress API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -166,7 +166,7 @@ class XpressPyomoSolver(BasePyomoSolver):
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.solvers.tests API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -242,7 +242,7 @@ features, which can be provided as inputs to machine learning models.</p></secti
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.solvers.tests.test_internal_solver API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -535,7 +535,7 @@ def test_iteration_cb():
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.solvers.tests.test_lazy_cb API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -111,7 +111,7 @@ def test_lazy_cb():
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.solvers.tests.test_learning_solver API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -155,7 +155,17 @@ def test_simulate_perfect():
simulate_perfect=True,
)
stats = solver.solve(tmp.name)
assert stats[&#34;Lower bound&#34;] == stats[&#34;Predicted LB&#34;]</code></pre>
assert stats[&#34;Lower bound&#34;] == stats[&#34;Predicted LB&#34;]
def test_gap():
assert LearningSolver._compute_gap(ub=0.0, lb=0.0) == 0.0
assert LearningSolver._compute_gap(ub=1.0, lb=0.5) == 0.5
assert LearningSolver._compute_gap(ub=1.0, lb=1.0) == 0.0
assert LearningSolver._compute_gap(ub=1.0, lb=-1.0) is None
assert LearningSolver._compute_gap(ub=1.0, lb=None) is None
assert LearningSolver._compute_gap(ub=None, lb=1.0) is None
assert LearningSolver._compute_gap(ub=None, lb=None) is None</code></pre>
</details>
</section>
<section>
@ -165,6 +175,25 @@ def test_simulate_perfect():
<section>
<h2 class="section-title" id="header-functions">Functions</h2>
<dl>
<dt id="miplearn.solvers.tests.test_learning_solver.test_gap"><code class="name flex">
<span>def <span class="ident">test_gap</span></span>(<span>)</span>
</code></dt>
<dd>
<section class="desc"></section>
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python">def test_gap():
assert LearningSolver._compute_gap(ub=0.0, lb=0.0) == 0.0
assert LearningSolver._compute_gap(ub=1.0, lb=0.5) == 0.5
assert LearningSolver._compute_gap(ub=1.0, lb=1.0) == 0.0
assert LearningSolver._compute_gap(ub=1.0, lb=-1.0) is None
assert LearningSolver._compute_gap(ub=1.0, lb=None) is None
assert LearningSolver._compute_gap(ub=None, lb=1.0) is None
assert LearningSolver._compute_gap(ub=None, lb=None) is None</code></pre>
</details>
</dd>
<dt id="miplearn.solvers.tests.test_learning_solver.test_learning_solver"><code class="name flex">
<span>def <span class="ident">test_learning_solver</span></span>(<span>)</span>
</code></dt>
@ -346,6 +375,7 @@ def test_simulate_perfect():
</li>
<li><h3><a href="#header-functions">Functions</a></h3>
<ul class="">
<li><code><a title="miplearn.solvers.tests.test_learning_solver.test_gap" href="#miplearn.solvers.tests.test_learning_solver.test_gap">test_gap</a></code></li>
<li><code><a title="miplearn.solvers.tests.test_learning_solver.test_learning_solver" href="#miplearn.solvers.tests.test_learning_solver.test_learning_solver">test_learning_solver</a></code></li>
<li><code><a title="miplearn.solvers.tests.test_learning_solver.test_parallel_solve" href="#miplearn.solvers.tests.test_learning_solver.test_parallel_solve">test_parallel_solve</a></code></li>
<li><code><a title="miplearn.solvers.tests.test_learning_solver.test_simulate_perfect" href="#miplearn.solvers.tests.test_learning_solver.test_simulate_perfect">test_simulate_perfect</a></code></li>
@ -357,7 +387,7 @@ def test_simulate_perfect():
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.tests API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -130,7 +130,7 @@ def get_test_pyomo_instances():
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.tests.test_benchmark API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -55,24 +55,10 @@ def test_benchmark():
benchmark = BenchmarkRunner(test_solvers)
benchmark.fit(train_instances)
benchmark.parallel_solve(test_instances, n_jobs=2, n_trials=2)
assert benchmark.raw_results().values.shape == (12, 14)
assert benchmark.results.values.shape == (12, 14)
benchmark.save_results(&#34;/tmp/benchmark.csv&#34;)
assert os.path.isfile(&#34;/tmp/benchmark.csv&#34;)
benchmark = BenchmarkRunner(test_solvers)
benchmark.load_results(&#34;/tmp/benchmark.csv&#34;)
assert benchmark.raw_results().values.shape == (12, 14)
def test_gap():
assert BenchmarkRunner._compute_gap(ub=0.0, lb=0.0) == 0.0
assert BenchmarkRunner._compute_gap(ub=1.0, lb=0.5) == 0.5
assert BenchmarkRunner._compute_gap(ub=1.0, lb=1.0) == 0.0
assert BenchmarkRunner._compute_gap(ub=1.0, lb=-1.0) == 1.0
assert BenchmarkRunner._compute_gap(ub=1.0, lb=None) == 1.0
assert BenchmarkRunner._compute_gap(ub=None, lb=1.0) == 1.0
assert BenchmarkRunner._compute_gap(ub=None, lb=None) == 1.0</code></pre>
benchmark.write_csv(&#34;/tmp/benchmark.csv&#34;)
assert os.path.isfile(&#34;/tmp/benchmark.csv&#34;)</code></pre>
</details>
</section>
<section>
@ -109,33 +95,10 @@ def test_gap():
benchmark = BenchmarkRunner(test_solvers)
benchmark.fit(train_instances)
benchmark.parallel_solve(test_instances, n_jobs=2, n_trials=2)
assert benchmark.raw_results().values.shape == (12, 14)
assert benchmark.results.values.shape == (12, 14)
benchmark.save_results(&#34;/tmp/benchmark.csv&#34;)
assert os.path.isfile(&#34;/tmp/benchmark.csv&#34;)
benchmark = BenchmarkRunner(test_solvers)
benchmark.load_results(&#34;/tmp/benchmark.csv&#34;)
assert benchmark.raw_results().values.shape == (12, 14)</code></pre>
</details>
</dd>
<dt id="miplearn.tests.test_benchmark.test_gap"><code class="name flex">
<span>def <span class="ident">test_gap</span></span>(<span>)</span>
</code></dt>
<dd>
<section class="desc"></section>
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python">def test_gap():
assert BenchmarkRunner._compute_gap(ub=0.0, lb=0.0) == 0.0
assert BenchmarkRunner._compute_gap(ub=1.0, lb=0.5) == 0.5
assert BenchmarkRunner._compute_gap(ub=1.0, lb=1.0) == 0.0
assert BenchmarkRunner._compute_gap(ub=1.0, lb=-1.0) == 1.0
assert BenchmarkRunner._compute_gap(ub=1.0, lb=None) == 1.0
assert BenchmarkRunner._compute_gap(ub=None, lb=1.0) == 1.0
assert BenchmarkRunner._compute_gap(ub=None, lb=None) == 1.0</code></pre>
benchmark.write_csv(&#34;/tmp/benchmark.csv&#34;)
assert os.path.isfile(&#34;/tmp/benchmark.csv&#34;)</code></pre>
</details>
</dd>
</dl>
@ -157,14 +120,13 @@ def test_gap():
<li><h3><a href="#header-functions">Functions</a></h3>
<ul class="">
<li><code><a title="miplearn.tests.test_benchmark.test_benchmark" href="#miplearn.tests.test_benchmark.test_benchmark">test_benchmark</a></code></li>
<li><code><a title="miplearn.tests.test_benchmark.test_gap" href="#miplearn.tests.test_benchmark.test_gap">test_gap</a></code></li>
</ul>
</li>
</ul>
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.tests.test_extractors API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -193,7 +193,7 @@ def test_variable_features_extractor():
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -3,7 +3,7 @@
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.7.0" />
<meta name="generator" content="pdoc 0.7.5" />
<title>miplearn.types API documentation</title>
<meta name="description" content="" />
<link href='https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css' rel='stylesheet'>
@ -73,6 +73,25 @@ MIPSolveStats = TypedDict(
},
)
LearningSolveStats = TypedDict(
&#34;LearningSolveStats&#34;,
{
&#34;Gap&#34;: Optional[float],
&#34;Instance&#34;: Union[str, int],
&#34;LP value&#34;: Optional[float],
&#34;Log&#34;: str,
&#34;Lower bound&#34;: Optional[float],
&#34;Mode&#34;: str,
&#34;Nodes&#34;: Optional[int],
&#34;Sense&#34;: str,
&#34;Solver&#34;: str,
&#34;Upper bound&#34;: Optional[float],
&#34;Wallclock time&#34;: float,
&#34;Warm start value&#34;: Optional[float],
},
total=False,
)
IterationCallback = Callable[[], bool]
LazyCallback = Callable[[Any, Any], None]
@ -97,7 +116,6 @@ class Constraint:
<dl>
<dt id="miplearn.types.Constraint"><code class="flex name class">
<span>class <span class="ident">Constraint</span></span>
<span>(</span><span>*args, **kwargs)</span>
</code></dt>
<dd>
<section class="desc"></section>
@ -130,6 +148,27 @@ dict(one=1, two=2)</p></section>
<li>builtins.dict</li>
</ul>
</dd>
<dt id="miplearn.types.LearningSolveStats"><code class="flex name class">
<span>class <span class="ident">LearningSolveStats</span></span>
<span>(</span><span>*args, **kwargs)</span>
</code></dt>
<dd>
<section class="desc"><p>dict() -&gt; new empty dictionary
dict(mapping) -&gt; new dictionary initialized from a mapping object's
(key, value) pairs
dict(iterable) -&gt; new dictionary initialized as if via:
d = {}
for k, v in iterable:
d[k] = v
dict(**kwargs) -&gt; new dictionary initialized with the name=value pairs
in the keyword argument list.
For example:
dict(one=1, two=2)</p></section>
<h3>Ancestors</h3>
<ul class="hlist">
<li>builtins.dict</li>
</ul>
</dd>
<dt id="miplearn.types.MIPSolveStats"><code class="flex name class">
<span>class <span class="ident">MIPSolveStats</span></span>
<span>(</span><span>*args, **kwargs)</span>
@ -195,6 +234,9 @@ dict(one=1, two=2)</p></section>
<h4><code><a title="miplearn.types.LPSolveStats" href="#miplearn.types.LPSolveStats">LPSolveStats</a></code></h4>
</li>
<li>
<h4><code><a title="miplearn.types.LearningSolveStats" href="#miplearn.types.LearningSolveStats">LearningSolveStats</a></code></h4>
</li>
<li>
<h4><code><a title="miplearn.types.MIPSolveStats" href="#miplearn.types.MIPSolveStats">MIPSolveStats</a></code></h4>
</li>
<li>
@ -206,7 +248,7 @@ dict(one=1, two=2)</p></section>
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.0</a>.</p>
<p>Generated by <a href="https://pdoc3.github.io/pdoc"><cite>pdoc</cite> 0.7.5</a>.</p>
</footer>
<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad()</script>

@ -1,307 +0,0 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<link rel="shortcut icon" href="../img/favicon.ico">
<title>Benchmark - MIPLearn</title>
<link rel="stylesheet" href="https://use.fontawesome.com/releases/v5.12.0/css/all.css">
<link rel="stylesheet" href="https://use.fontawesome.com/releases/v5.12.0/css/v4-shims.css">
<link rel="stylesheet" href="//cdn.jsdelivr.net/npm/hack-font@3.3.0/build/web/hack.min.css">
<link href='//rsms.me/inter/inter.css' rel='stylesheet' type='text/css'>
<link href='//fonts.googleapis.com/css?family=Open+Sans:300italic,400italic,700italic,400,300,600,700&subset=latin-ext,latin' rel='stylesheet' type='text/css'>
<link href="../css/bootstrap-custom.min.css" rel="stylesheet">
<link href="../css/base.min.css" rel="stylesheet">
<link href="../css/cinder.min.css" rel="stylesheet">
<link rel="stylesheet" href="//cdn.jsdelivr.net/gh/highlightjs/cdn-release@9.18.0/build/styles/github.min.css">
<link href="../css/custom.css" rel="stylesheet">
<!-- HTML5 shim and Respond.js IE8 support of HTML5 elements and media queries -->
<!--[if lt IE 9]>
<script src="https://cdn.jsdelivr.net/npm/html5shiv@3.7.3/dist/html5shiv.min.js"></script>
<script src="https://cdn.jsdelivr.net/npm/respond.js@1.4.2/dest/respond.min.js"></script>
<![endif]-->
</head>
<body>
<div class="navbar navbar-default navbar-fixed-top" role="navigation">
<div class="container">
<!-- Collapsed navigation -->
<div class="navbar-header">
<!-- Expander button -->
<button type="button" class="navbar-toggle" data-toggle="collapse" data-target=".navbar-collapse">
<span class="sr-only">Toggle navigation</span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
</button>
<!-- Main title -->
<a class="navbar-brand" href="..">MIPLearn</a>
</div>
<!-- Expanded navigation -->
<div class="navbar-collapse collapse">
<!-- Main navigation -->
<ul class="nav navbar-nav">
<li >
<a href="..">Home</a>
</li>
<li >
<a href="../usage/">Usage</a>
</li>
<li class="active">
<a href="./">Benchmark</a>
</li>
<li >
<a href="../problems/">Problems</a>
</li>
<li >
<a href="../customization/">Customization</a>
</li>
<li >
<a href="../about/">About</a>
</li>
<li >
<a href="../api/miplearn/index.html">API</a>
</li>
</ul>
<ul class="nav navbar-nav navbar-right">
<li>
<a href="#" data-toggle="modal" data-target="#mkdocs_search_modal">
<i class="fas fa-search"></i> Search
</a>
</li>
<li >
<a rel="prev" href="../usage/">
<i class="fas fa-arrow-left"></i> Previous
</a>
</li>
<li >
<a rel="next" href="../problems/">
Next <i class="fas fa-arrow-right"></i>
</a>
</li>
<li>
<a href="https://github.com/ANL-CEEESA/MIPLearn/edit/dev/docs/benchmark.md"><i class="fab fa-github"></i> Edit on GitHub</a>
</li>
</ul>
</div>
</div>
</div>
<div class="container">
<div class="col-md-3"><div class="bs-sidebar hidden-print affix well" role="complementary">
<ul class="nav bs-sidenav">
<li class="first-level active"><a href="#benchmarks-utilities">Benchmarks Utilities</a></li>
<li class="second-level"><a href="#using-benchmarkrunner">Using BenchmarkRunner</a></li>
<li class="second-level"><a href="#saving-and-loading-benchmark-results">Saving and loading benchmark results</a></li>
</ul>
</div></div>
<div class="col-md-9" role="main">
<h1 id="benchmarks-utilities">Benchmarks Utilities</h1>
<h3 id="using-benchmarkrunner">Using <code>BenchmarkRunner</code></h3>
<p>MIPLearn provides the utility class <code>BenchmarkRunner</code>, which simplifies the task of comparing the performance of different solvers. The snippet below shows its basic usage:</p>
<pre><code class="language-python">from miplearn import BenchmarkRunner, LearningSolver
# Create train and test instances
train_instances = [...]
test_instances = [...]
# Training phase...
training_solver = LearningSolver(...)
training_solver.parallel_solve(train_instances, n_jobs=10)
# Test phase...
test_solvers = {
&quot;Baseline&quot;: LearningSolver(...), # each solver may have different parameters
&quot;Strategy A&quot;: LearningSolver(...),
&quot;Strategy B&quot;: LearningSolver(...),
&quot;Strategy C&quot;: LearningSolver(...),
}
benchmark = BenchmarkRunner(test_solvers)
benchmark.fit(train_instances)
benchmark.parallel_solve(test_instances, n_jobs=2)
print(benchmark.raw_results())
</code></pre>
<p>The method <code>fit</code> trains the ML models for each individual solver. The method <code>parallel_solve</code> solves the test instances in parallel, and collects solver statistics such as running time and optimal value. Finally, <code>raw_results</code> produces a table of results (Pandas DataFrame) with the following columns:</p>
<ul>
<li><strong>Solver,</strong> the name of the solver.</li>
<li><strong>Instance,</strong> the sequence number identifying the instance.</li>
<li><strong>Wallclock Time,</strong> the wallclock running time (in seconds) spent by the solver;</li>
<li><strong>Lower Bound,</strong> the best lower bound obtained by the solver;</li>
<li><strong>Upper Bound,</strong> the best upper bound obtained by the solver;</li>
<li><strong>Gap,</strong> the relative MIP integrality gap at the end of the optimization;</li>
<li><strong>Nodes,</strong> the number of explored branch-and-bound nodes.</li>
</ul>
<p>In addition to the above, there is also a "Relative" version of most columns, where the raw number is compared to the solver which provided the best performance. The <em>Relative Wallclock Time</em> for example, indicates how many times slower this run was when compared to the best time achieved by any solver when processing this instance. For example, if this run took 10 seconds, but the fastest solver took only 5 seconds to solve the same instance, the relative wallclock time would be 2.</p>
<h3 id="saving-and-loading-benchmark-results">Saving and loading benchmark results</h3>
<p>When iteratively exploring new formulations, encoding and solver parameters, it is often desirable to avoid repeating parts of the benchmark suite. For example, if the baseline solver has not been changed, there is no need to evaluate its performance again and again when making small changes to the remaining solvers. <code>BenchmarkRunner</code> provides the methods <code>save_results</code> and <code>load_results</code>, which can be used to avoid this repetition, as the next example shows:</p>
<pre><code class="language-python"># Benchmark baseline solvers and save results to a file.
benchmark = BenchmarkRunner(baseline_solvers)
benchmark.parallel_solve(test_instances)
benchmark.save_results(&quot;baseline_results.csv&quot;)
# Benchmark remaining solvers, loading baseline results from file.
benchmark = BenchmarkRunner(alternative_solvers)
benchmark.load_results(&quot;baseline_results.csv&quot;)
benchmark.fit(training_instances)
benchmark.parallel_solve(test_instances)
</code></pre></div>
</div>
<footer class="col-md-12 text-center">
<hr>
<p>
<small>Copyright © 2020, UChicago Argonne, LLC. All Rights Reserved.</small><br>
<small>Documentation built with <a href="http://www.mkdocs.org/">MkDocs</a>.</small>
</p>
</footer>
<script src="//ajax.googleapis.com/ajax/libs/jquery/1.12.4/jquery.min.js"></script>
<script src="../js/bootstrap-3.0.3.min.js"></script>
<script src="//cdn.jsdelivr.net/gh/highlightjs/cdn-release@9.18.0/build/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad();</script>
<script>var base_url = ".."</script>
<script src="../js/base.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0/MathJax.js?config=TeX-AMS-MML_HTMLorMML"></script>
<script src="../js/mathjax.js"></script>
<script src="../search/main.js"></script>
<div class="modal" id="mkdocs_search_modal" tabindex="-1" role="dialog" aria-labelledby="searchModalLabel" aria-hidden="true">
<div class="modal-dialog modal-lg">
<div class="modal-content">
<div class="modal-header">
<button type="button" class="close" data-dismiss="modal">
<span aria-hidden="true">&times;</span>
<span class="sr-only">Close</span>
</button>
<h4 class="modal-title" id="searchModalLabel">Search</h4>
</div>
<div class="modal-body">
<p>
From here you can search these documents. Enter
your search terms below.
</p>
<form>
<div class="form-group">
<input type="text" class="form-control" placeholder="Search..." id="mkdocs-search-query" title="Type search term here">
</div>
</form>
<div id="mkdocs-search-results"></div>
</div>
<div class="modal-footer">
</div>
</div>
</div>
</div><div class="modal" id="mkdocs_keyboard_modal" tabindex="-1" role="dialog" aria-labelledby="keyboardModalLabel" aria-hidden="true">
<div class="modal-dialog">
<div class="modal-content">
<div class="modal-header">
<h4 class="modal-title" id="keyboardModalLabel">Keyboard Shortcuts</h4>
<button type="button" class="close" data-dismiss="modal"><span aria-hidden="true">&times;</span><span class="sr-only">Close</span></button>
</div>
<div class="modal-body">
<table class="table">
<thead>
<tr>
<th style="width: 20%;">Keys</th>
<th>Action</th>
</tr>
</thead>
<tbody>
<tr>
<td class="help shortcut"><kbd>?</kbd></td>
<td>Open this help</td>
</tr>
<tr>
<td class="next shortcut"><kbd>n</kbd></td>
<td>Next page</td>
</tr>
<tr>
<td class="prev shortcut"><kbd>p</kbd></td>
<td>Previous page</td>
</tr>
<tr>
<td class="search shortcut"><kbd>s</kbd></td>
<td>Search</td>
</tr>
</tbody>
</table>
</div>
<div class="modal-footer">
</div>
</div>
</div>
</div>
</body>
</html>

@ -82,12 +82,6 @@
<li >
<a href="../benchmark/">Benchmark</a>
</li>
<li >
<a href="../problems/">Problems</a>
</li>

@ -82,12 +82,6 @@
<li >
<a href="benchmark/">Benchmark</a>
</li>
<li >
<a href="problems/">Problems</a>
</li>
@ -176,8 +170,7 @@ Unlike conventional MIP solvers, MIPLearn can take full advantage of very specif
<h3 id="documentation">Documentation</h3>
<ul>
<li><a href="usage/">Installation and typical usage</a></li>
<li><a href="benchmark/">Benchmark utilities</a></li>
<li><a href="problems/">Benchmark problems, challenges and results</a></li>
<li><a href="problems/">Benchmark problems and results</a></li>
<li><a href="customization/">Customizing the solver</a></li>
<li><a href="about/">License, authors, references and acknowledgments</a></li>
</ul>
@ -293,5 +286,5 @@ Unlike conventional MIP solvers, MIPLearn can take full advantage of very specif
<!--
MkDocs version : 1.1.2
Build Date UTC : 2021-01-22 02:31:46.190084+00:00
Build Date UTC : 2021-01-22 13:24:54.236702+00:00
-->

@ -82,12 +82,6 @@
<li >
<a href="../benchmark/">Benchmark</a>
</li>
<li class="active">
<a href="./">Problems</a>
</li>
@ -120,7 +114,7 @@
</a>
</li>
<li >
<a rel="prev" href="../benchmark/">
<a rel="prev" href="../usage/">
<i class="fas fa-arrow-left"></i> Previous
</a>
</li>

File diff suppressed because one or more lines are too long

@ -19,9 +19,5 @@
<loc>None</loc>
<lastmod>2021-01-22</lastmod>
<changefreq>daily</changefreq>
</url><url>
<loc>None</loc>
<lastmod>2021-01-22</lastmod>
<changefreq>daily</changefreq>
</url>
</urlset>

Binary file not shown.

@ -82,12 +82,6 @@
<li >
<a href="../benchmark/">Benchmark</a>
</li>
<li >
<a href="../problems/">Problems</a>
</li>
@ -125,7 +119,7 @@
</a>
</li>
<li >
<a rel="next" href="../benchmark/">
<a rel="next" href="../problems/">
Next <i class="fas fa-arrow-right"></i>
</a>
</li>
@ -160,7 +154,9 @@
<li class="third-level"><a href="#61-saving-and-loading-solver-state">6.1 Saving and loading solver state</a></li>
<li class="third-level"><a href="#62-solving-instances-in-parallel">6.2 Solving instances in parallel</a></li>
<li class="third-level"><a href="#63-solving-instances-from-the-disk">6.3 Solving instances from the disk</a></li>
<li class="second-level"><a href="#7-current-limitations">7. Current Limitations</a></li>
<li class="second-level"><a href="#7-running-benchmarks">7. Running benchmarks</a></li>
<li class="second-level"><a href="#8-current-limitations">8. Current Limitations</a></li>
</ul>
</div></div>
@ -168,9 +164,9 @@
<h1 id="usage">Usage</h1>
<h2 id="1-installation">1. Installation</h2>
<p>In these docs, we describe the Python/Pyomo version of the package, although a <a href="https://github.com/ANL-CEEESA/MIPLearn.jl">Julia/JuMP version</a> is also available. A mixed-integer solver is also required and its Python bindings must be properly installed. Supported solvers are currently CPLEX and Gurobi.</p>
<p>In these docs, we describe the Python/Pyomo version of the package, although a <a href="https://github.com/ANL-CEEESA/MIPLearn.jl">Julia/JuMP version</a> is also available. A mixed-integer solver is also required and its Python bindings must be properly installed. Supported solvers are currently CPLEX, Gurobi and XPRESS.</p>
<p>To install MIPLearn, run: </p>
<pre><code class="language-bash">pip3 install miplearn
<pre><code class="language-bash">pip3 install --upgrade miplearn==0.2.*
</code></pre>
<p>After installation, the package <code>miplearn</code> should become available to Python. It can be imported
as follows:</p>
@ -244,7 +240,7 @@ for instance in test_instances:
</div>
<h2 id="5-obtaining-heuristic-solutions">5. Obtaining heuristic solutions</h2>
<p>By default, <code>LearningSolver</code> uses Machine Learning to accelerate the MIP solution process, while maintaining all optimality guarantees provided by the MIP solver. In the default mode of operation, for example, predicted optimal solutions are used only as MIP starts.</p>
<p>For more significant performance benefits, <code>LearningSolver</code> can also be configured to place additional trust in the Machine Learning predictors, by using the <code>mode="heuristic"</code> constructor argument. When operating in this mode, if a ML model is statistically shown (through <em>stratified k-fold cross validation</em>) to have exceptionally high accuracy, the solver may decide to restrict the search space based on its predictions. The parts of the solution which the ML models cannot predict accurately will still be explored using traditional (branch-and-bound) methods. For particular applications, this mode has been shown to quickly produce optimal or near-optimal solutions (see <a href="../about/#references">references</a> and <a href="../benchmark/">benchmark results</a>).</p>
<p>For more significant performance benefits, <code>LearningSolver</code> can also be configured to place additional trust in the Machine Learning predictors, by using the <code>mode="heuristic"</code> constructor argument. When operating in this mode, if a ML model is statistically shown (through <em>stratified k-fold cross validation</em>) to have exceptionally high accuracy, the solver may decide to restrict the search space based on its predictions. The parts of the solution which the ML models cannot predict accurately will still be explored using traditional (branch-and-bound) methods. For particular applications, this mode has been shown to quickly produce optimal or near-optimal solutions (see <a href="../about/#references">references</a> and <a href="../problems/">benchmark results</a>).</p>
<div class="admonition danger">
<p class="admonition-title">Danger</p>
<p>The <code>heuristic</code> mode provides no optimality guarantees, and therefore should only be used if the solver is first trained on a large and representative set of training instances. Training on a small or non-representative set of instances may produce low-quality solutions, or make the solver incorrectly classify new instances as infeasible.</p>
@ -295,11 +291,12 @@ solver.parallel_solve(test_instances)
<h3 id="63-solving-instances-from-the-disk">6.3 Solving instances from the disk</h3>
<p>In all examples above, we have assumed that instances are available as Python objects, stored in memory. When problem instances are very large, or when there is a large number of problem instances, this approach may require an excessive amount of memory. To reduce memory requirements, MIPLearn can also operate on instances that are stored on disk. More precisely, the methods <code>fit</code>, <code>solve</code> and <code>parallel_solve</code> in <code>LearningSolver</code> can operate on filenames (or lists of filenames) instead of instance objects, as the next example illustrates.
Instance files must be pickled instance objects. The method <code>solve</code> loads at most one instance to memory at a time, while <code>parallel_solve</code> loads at most <code>n_jobs</code> instances.</p>
<pre><code class="language-python">from miplearn import LearningSolver
<pre><code class="language-python">import pickle
from miplearn import LearningSolver
# Construct and pickle 600 problem instances
for i in range(600):
instance = CustomInstance([...])
instance = MyProblemInstance([...])
with open(&quot;instance_%03d.pkl&quot; % i, &quot;w&quot;) as file:
pickle.dump(instance, obj)
@ -319,21 +316,45 @@ solver.fit(train_instances)
# Solve test instances
solver.parallel_solve(test_instances, n_jobs=4)
</code></pre>
<p>By default, <code>solve</code> and <code>parallel_solve</code> modify files in place. That is, after the instances are loaded from disk and solved, MIPLearn writes them back to the disk, overwriting the original files. To write to an alternative file instead, the argument <code>output</code> may be used. In <code>solve</code>, this argument should be a single filename. In <code>parallel_solve</code>, it should be a list, containing exactly as many filenames as instances. If <code>output</code> is <code>None</code>, the modifications are simply discarded. This can be useful, for example, during benchmarks.</p>
<pre><code class="language-python"># Solve a single instance file and store the output to another file
solver.solve(&quot;knapsack_1.orig.pkl&quot;, output=&quot;knapsack_1.solved.pkl&quot;)
<p>By default, <code>solve</code> and <code>parallel_solve</code> modify files in place. That is, after the instances are loaded from disk and solved, MIPLearn writes them back to the disk, overwriting the original files. To write to an alternative file instead, use the arguments <code>output_filename</code> (in <code>solve</code>) and <code>output_filenames</code> (in <code>parallel_solve</code>). To discard the modifications instead, use <code>discard_outputs=True</code>. This can be useful, for example, during benchmarks.</p>
<pre><code class="language-python"># Solve a single instance file and write the output to another file
solver.solve(&quot;knapsack_1.orig.pkl&quot;, output_filename=&quot;knapsack_1.solved.pkl&quot;)
# Solve a list of instance files
instances = [&quot;knapsack_%03d.orig.pkl&quot; % i for i in range(100)]
output = [&quot;knapsack_%03d.solved.pkl&quot; % i for i in range(100)]
solver.parallel_solve(instances, output=output)
solver.parallel_solve(instances, output_filenames=output)
# Solve instances and discard solutions and training data
solver.parallel_solve(instances, output=None)
solver.parallel_solve(instances, discard_outputs=True)
</code></pre>
<h2 id="7-running-benchmarks">7. Running benchmarks</h2>
<p>MIPLearn provides the utility class <code>BenchmarkRunner</code>, which simplifies the task of comparing the performance of different solvers. The snippet below shows its basic usage:</p>
<pre><code class="language-python">from miplearn import BenchmarkRunner, LearningSolver
# Create train and test instances
train_instances = [...]
test_instances = [...]
# Training phase...
training_solver = LearningSolver(...)
training_solver.parallel_solve(train_instances, n_jobs=10)
# Test phase...
benchmark = BenchmarkRunner({
&quot;Baseline&quot;: LearningSolver(...),
&quot;Strategy A&quot;: LearningSolver(...),
&quot;Strategy B&quot;: LearningSolver(...),
&quot;Strategy C&quot;: LearningSolver(...),
})
benchmark.fit(train_instances)
benchmark.parallel_solve(test_instances, n_jobs=5)
benchmark.write_csv(&quot;results.csv&quot;)
</code></pre>
<h2 id="7-current-limitations">7. Current Limitations</h2>
<p>The method <code>fit</code> trains the ML models for each individual solver. The method <code>parallel_solve</code> solves the test instances in parallel, and collects solver statistics such as running time and optimal value. Finally, <code>write_csv</code> produces a table of results. The columns in the CSV file depend on the components added to the solver.</p>
<h2 id="8-current-limitations">8. Current Limitations</h2>
<ul>
<li>Only binary and continuous decision variables are currently supported. General integer variables are not currently supported by all solver components.</li>
<li>Only binary and continuous decision variables are currently supported. General integer variables are not currently supported by some solver components.</li>
</ul></div>

Loading…
Cancel
Save