mirror of
https://github.com/ANL-CEEESA/MIPLearn.git
synced 2025-12-08 18:38:51 -06:00
Compare commits
39 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 9f0fa0e500 | |||
| 485625e07f | |||
| 146fb6b615 | |||
| 1d44980a7b | |||
| 4137378bb8 | |||
| 427bd1d806 | |||
| 14e2fe331d | |||
| 15cdb7e679 | |||
| 9192bb02eb | |||
| a4cb46f73e | |||
| 7fd88b0a3d | |||
| 1f59ed4065 | |||
| aa291410d8 | |||
| ca05429203 | |||
| 4eeb1c1ab3 | |||
| bfaae7c005 | |||
| 596f41c477 | |||
| 19e1f52b4f | |||
| 7ed213d4ce | |||
| daa801b5e9 | |||
| 2ca2794457 | |||
| 1c6912cc51 | |||
| eb914a4bdd | |||
| a306f0df26 | |||
| e0b4181579 | |||
| 332b2b9fca | |||
| af65069202 | |||
| dadd2216f1 | |||
| 5fefb49566 | |||
| 3775c3f780 | |||
| e66e6d7660 | |||
| 8e05a69351 | |||
| 7ccb7875b9 | |||
|
f085ab538b
|
|||
|
7f273ebb70
|
|||
|
26cfab0ebd
|
|||
| 52ed34784d | |||
|
0534d50af3
|
|||
| 8a02e22a35 |
49
CHANGELOG.md
49
CHANGELOG.md
@@ -3,7 +3,22 @@
|
|||||||
All notable changes to this project will be documented in this file.
|
All notable changes to this project will be documented in this file.
|
||||||
|
|
||||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
and this project adheres to
|
||||||
|
[Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||||
|
|
||||||
|
## [0.4.3] - 2025-05-10
|
||||||
|
|
||||||
|
## Changed
|
||||||
|
|
||||||
|
- Update dependency: Gurobi 12
|
||||||
|
|
||||||
|
## [0.4.2] - 2024-12-10
|
||||||
|
|
||||||
|
## Changed
|
||||||
|
|
||||||
|
- H5File: Use float64 precision instead of float32
|
||||||
|
- LearningSolver: optimize now returns (model, stats) instead of just stats
|
||||||
|
- Update dependency: Gurobi 11
|
||||||
|
|
||||||
## [0.4.0] - 2024-02-06
|
## [0.4.0] - 2024-02-06
|
||||||
|
|
||||||
@@ -15,31 +30,41 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||||||
### Changed
|
### Changed
|
||||||
|
|
||||||
- LearningSolver.solve no longer generates HDF5 files; use a collector instead.
|
- LearningSolver.solve no longer generates HDF5 files; use a collector instead.
|
||||||
- Add `_gurobipy` suffix to all `build_model` functions; implement some `_pyomo` and `_jump` functions.
|
- Add `_gurobipy` suffix to all `build_model` functions; implement some `_pyomo`
|
||||||
|
and `_jump` functions.
|
||||||
|
|
||||||
## [0.3.0] - 2023-06-08
|
## [0.3.0] - 2023-06-08
|
||||||
|
|
||||||
This is a complete rewrite of the original prototype package, with an entirely new API, focused on performance, scalability and flexibility.
|
This is a complete rewrite of the original prototype package, with an entirely
|
||||||
|
new API, focused on performance, scalability and flexibility.
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
|
|
||||||
- Add support for Python/Gurobipy and Julia/JuMP, in addition to the existing Python/Pyomo interface.
|
- Add support for Python/Gurobipy and Julia/JuMP, in addition to the existing
|
||||||
- Add six new random instance generators (bin packing, capacitated p-median, set cover, set packing, unit commitment, vertex cover), in addition to the three existing generators (multiknapsack, stable set, tsp).
|
Python/Pyomo interface.
|
||||||
- Collect some additional raw training data (e.g. basis status, reduced costs, etc)
|
- Add six new random instance generators (bin packing, capacitated p-median, set
|
||||||
- Add new primal solution ML strategies (memorizing, independent vars and joint vars)
|
cover, set packing, unit commitment, vertex cover), in addition to the three
|
||||||
- Add new primal solution actions (set warm start, fix variables, enforce proximity)
|
existing generators (multiknapsack, stable set, tsp).
|
||||||
|
- Collect some additional raw training data (e.g. basis status, reduced costs,
|
||||||
|
etc)
|
||||||
|
- Add new primal solution ML strategies (memorizing, independent vars and joint
|
||||||
|
vars)
|
||||||
|
- Add new primal solution actions (set warm start, fix variables, enforce
|
||||||
|
proximity)
|
||||||
- Add runnable tutorials and user guides to the documentation.
|
- Add runnable tutorials and user guides to the documentation.
|
||||||
|
|
||||||
### Changed
|
### Changed
|
||||||
|
|
||||||
- To support large-scale problems and datasets, switch from an in-memory architecture to a file-based architecture, using HDF5 files.
|
- To support large-scale problems and datasets, switch from an in-memory
|
||||||
- To accelerate development cycle, split training data collection from feature extraction.
|
architecture to a file-based architecture, using HDF5 files.
|
||||||
|
- To accelerate development cycle, split training data collection from feature
|
||||||
|
extraction.
|
||||||
|
|
||||||
### Removed
|
### Removed
|
||||||
|
|
||||||
- Temporarily remove ML strategies for lazy constraints
|
- Temporarily remove ML strategies for lazy constraints
|
||||||
- Remove benchmarks from documentation. These will be published in a separate paper.
|
- Remove benchmarks from documentation. These will be published in a separate
|
||||||
|
paper.
|
||||||
|
|
||||||
## [0.1.0] - 2020-11-23
|
## [0.1.0] - 2020-11-23
|
||||||
|
|
||||||
|
|||||||
6
Makefile
6
Makefile
@@ -7,6 +7,10 @@ VERSION := 0.4
|
|||||||
|
|
||||||
all: docs test
|
all: docs test
|
||||||
|
|
||||||
|
conda-create:
|
||||||
|
conda env remove -n miplearn
|
||||||
|
conda create -n miplearn python=3.12
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
rm -rf build/* dist/*
|
rm -rf build/* dist/*
|
||||||
|
|
||||||
@@ -43,6 +47,6 @@ test:
|
|||||||
# rm -rf .mypy_cache
|
# rm -rf .mypy_cache
|
||||||
$(MYPY) -p miplearn
|
$(MYPY) -p miplearn
|
||||||
$(MYPY) -p tests
|
$(MYPY) -p tests
|
||||||
$(PYTEST) $(PYTEST_ARGS)
|
$(PYTEST) $(PYTEST_ARGS) .
|
||||||
|
|
||||||
.PHONY: test test-watch docs install dist
|
.PHONY: test test-watch docs install dist
|
||||||
|
|||||||
@@ -14,7 +14,7 @@
|
|||||||
</a>
|
</a>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
**MIPLearn** is an extensible framework for solving discrete optimization problems using a combination of Mixed-Integer Linear Programming (MIP) and Machine Learning (ML). MIPLearn uses ML methods to automatically identify patterns in previously solved instances of the problem, then uses these patterns to accelerate the performance of conventional state-of-the-art MIP solvers such as CPLEX, Gurobi or XPRESS.
|
**MIPLearn** is an extensible framework for solving discrete optimization problems using a combination of Mixed-Integer Programming (MIP) and Machine Learning (ML). MIPLearn uses ML methods to automatically identify patterns in previously solved instances of the problem, then uses these patterns to accelerate the performance of conventional state-of-the-art MIP solvers such as CPLEX, Gurobi or XPRESS.
|
||||||
|
|
||||||
Unlike pure ML methods, MIPLearn is not only able to find high-quality solutions to discrete optimization problems, but it can also prove the optimality and feasibility of these solutions. Unlike conventional MIP solvers, MIPLearn can take full advantage of very specific observations that happen to be true in a particular family of instances (such as the observation that a particular constraint is typically redundant, or that a particular variable typically assumes a certain value). For certain classes of problems, this approach may provide significant performance benefits.
|
Unlike pure ML methods, MIPLearn is not only able to find high-quality solutions to discrete optimization problems, but it can also prove the optimality and feasibility of these solutions. Unlike conventional MIP solvers, MIPLearn can take full advantage of very specific observations that happen to be true in a particular family of instances (such as the observation that a particular constraint is typically redundant, or that a particular variable typically assumes a certain value). For certain classes of problems, this approach may provide significant performance benefits.
|
||||||
|
|
||||||
|
|||||||
10
docs/_static/custom.css
vendored
10
docs/_static/custom.css
vendored
@@ -118,3 +118,13 @@ table tr:last-child {
|
|||||||
border-bottom: 0;
|
border-bottom: 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@media (min-width: 960px) {
|
||||||
|
.bd-page-width {
|
||||||
|
max-width: 100rem;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
.bd-sidebar-primary .sidebar-primary-items__end {
|
||||||
|
margin-bottom: 0;
|
||||||
|
margin-top: 0;
|
||||||
|
}
|
||||||
@@ -55,3 +55,9 @@ miplearn.problems.vertexcover
|
|||||||
.. automodule:: miplearn.problems.vertexcover
|
.. automodule:: miplearn.problems.vertexcover
|
||||||
:members:
|
:members:
|
||||||
|
|
||||||
|
miplearn.problems.maxcut
|
||||||
|
-----------------------------
|
||||||
|
|
||||||
|
.. automodule:: miplearn.problems.maxcut
|
||||||
|
:members:
|
||||||
|
|
||||||
|
|||||||
@@ -14,7 +14,7 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"## HDF5 Format\n",
|
"## HDF5 Format\n",
|
||||||
"\n",
|
"\n",
|
||||||
"MIPLearn stores all training data in [HDF5](HDF5) (Hierarchical Data Format, Version 5) files. The HDF format was originally developed by the [National Center for Supercomputing Applications][NCSA] (NCSA) for storing and organizing large amounts of data, and supports a variety of data types, including integers, floating-point numbers, strings, and arrays. Compared to other formats, such as CSV, JSON or SQLite, the HDF5 format provides several advantages for MIPLearn, including:\n",
|
"MIPLearn stores all training data in [HDF5][HDF5] (Hierarchical Data Format, Version 5) files. The HDF format was originally developed by the [National Center for Supercomputing Applications][NCSA] (NCSA) for storing and organizing large amounts of data, and supports a variety of data types, including integers, floating-point numbers, strings, and arrays. Compared to other formats, such as CSV, JSON or SQLite, the HDF5 format provides several advantages for MIPLearn, including:\n",
|
||||||
"\n",
|
"\n",
|
||||||
"- *Storage of multiple scalars, vectors and matrices in a single file* --- This allows MIPLearn to store all training data related to a given problem instance in a single file, which makes training data easier to store, organize and transfer.\n",
|
"- *Storage of multiple scalars, vectors and matrices in a single file* --- This allows MIPLearn to store all training data related to a given problem instance in a single file, which makes training data easier to store, organize and transfer.\n",
|
||||||
"- *High-performance partial I/O* --- Partial I/O allows MIPLearn to read a single element from the training data (e.g. value of the optimal solution) without loading the entire file to memory or reading it from beginning to end, which dramatically improves performance and reduces memory requirements. This is especially important when processing a large number of training data files.\n",
|
"- *High-performance partial I/O* --- Partial I/O allows MIPLearn to read a single element from the training data (e.g. value of the optimal solution) without loading the entire file to memory or reading it from beginning to end, which dramatically improves performance and reduces memory requirements. This is especially important when processing a large number of training data files.\n",
|
||||||
@@ -58,21 +58,21 @@
|
|||||||
"x1 = 1\n",
|
"x1 = 1\n",
|
||||||
"x2 = hello world\n",
|
"x2 = hello world\n",
|
||||||
"x3 = [1 2 3]\n",
|
"x3 = [1 2 3]\n",
|
||||||
"x4 = [[0.37454012 0.9507143 0.7319939 ]\n",
|
"x4 = [[0.37454012 0.95071431 0.73199394]\n",
|
||||||
" [0.5986585 0.15601864 0.15599452]\n",
|
" [0.59865848 0.15601864 0.15599452]\n",
|
||||||
" [0.05808361 0.8661761 0.601115 ]]\n",
|
" [0.05808361 0.86617615 0.60111501]]\n",
|
||||||
"x5 = (3, 2)\t0.6803075671195984\n",
|
"x5 = (3, 2)\t0.6803075385877797\n",
|
||||||
" (2, 3)\t0.4504992663860321\n",
|
" (2, 3)\t0.450499251969543\n",
|
||||||
" (0, 4)\t0.013264961540699005\n",
|
" (0, 4)\t0.013264961159866528\n",
|
||||||
" (2, 0)\t0.9422017335891724\n",
|
" (2, 0)\t0.9422017556848528\n",
|
||||||
" (2, 4)\t0.5632882118225098\n",
|
" (2, 4)\t0.5632882178455393\n",
|
||||||
" (1, 2)\t0.38541650772094727\n",
|
" (1, 2)\t0.3854165025399161\n",
|
||||||
" (1, 1)\t0.015966251492500305\n",
|
" (1, 1)\t0.015966252220214194\n",
|
||||||
" (0, 3)\t0.2308938205242157\n",
|
" (0, 3)\t0.230893825622149\n",
|
||||||
" (4, 4)\t0.24102546274662018\n",
|
" (4, 4)\t0.24102546602601171\n",
|
||||||
" (3, 1)\t0.6832635402679443\n",
|
" (3, 1)\t0.6832635188254582\n",
|
||||||
" (1, 3)\t0.6099966764450073\n",
|
" (1, 3)\t0.6099966577826209\n",
|
||||||
" (3, 0)\t0.83319491147995\n"
|
" (3, 0)\t0.8331949117361643\n"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
@@ -108,12 +108,6 @@
|
|||||||
" print(\"x5 =\", h5.get_sparse(\"x5\"))"
|
" print(\"x5 =\", h5.get_sparse(\"x5\"))"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"id": "50441907",
|
|
||||||
"metadata": {},
|
|
||||||
"source": []
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"id": "d0000c8d",
|
"id": "d0000c8d",
|
||||||
|
|||||||
@@ -69,22 +69,22 @@
|
|||||||
" -709. -605. -543. -321.\n",
|
" -709. -605. -543. -321.\n",
|
||||||
" -674. -571. -341. ]\n",
|
" -674. -571. -341. ]\n",
|
||||||
"variable features (10, 4) \n",
|
"variable features (10, 4) \n",
|
||||||
" [[-1.53124309e+03 -3.50000000e+02 0.00000000e+00 9.43468018e+01]\n",
|
" [[-1.53124309e+03 -3.50000000e+02 0.00000000e+00 9.43467993e+01]\n",
|
||||||
" [-1.53124309e+03 -6.92000000e+02 2.51703322e-01 0.00000000e+00]\n",
|
" [-1.53124309e+03 -6.92000000e+02 2.51703329e-01 0.00000000e+00]\n",
|
||||||
" [-1.53124309e+03 -4.54000000e+02 0.00000000e+00 8.25504150e+01]\n",
|
" [-1.53124309e+03 -4.54000000e+02 0.00000000e+00 8.25504181e+01]\n",
|
||||||
" [-1.53124309e+03 -7.09000000e+02 1.11373022e-01 0.00000000e+00]\n",
|
" [-1.53124309e+03 -7.09000000e+02 1.11373019e-01 0.00000000e+00]\n",
|
||||||
" [-1.53124309e+03 -6.05000000e+02 1.00000000e+00 -1.26055283e+02]\n",
|
" [-1.53124309e+03 -6.05000000e+02 1.00000000e+00 -1.26055279e+02]\n",
|
||||||
" [-1.53124309e+03 -5.43000000e+02 0.00000000e+00 1.68693771e+02]\n",
|
" [-1.53124309e+03 -5.43000000e+02 0.00000000e+00 1.68693775e+02]\n",
|
||||||
" [-1.53124309e+03 -3.21000000e+02 1.07488781e-01 0.00000000e+00]\n",
|
" [-1.53124309e+03 -3.21000000e+02 1.07488781e-01 0.00000000e+00]\n",
|
||||||
" [-1.53124309e+03 -6.74000000e+02 8.82293701e-01 0.00000000e+00]\n",
|
" [-1.53124309e+03 -6.74000000e+02 8.82293687e-01 0.00000000e+00]\n",
|
||||||
" [-1.53124309e+03 -5.71000000e+02 0.00000000e+00 1.41129074e+02]\n",
|
" [-1.53124309e+03 -5.71000000e+02 0.00000000e+00 1.41129074e+02]\n",
|
||||||
" [-1.53124309e+03 -3.41000000e+02 1.28830120e-01 0.00000000e+00]]\n",
|
" [-1.53124309e+03 -3.41000000e+02 1.28830116e-01 0.00000000e+00]]\n",
|
||||||
"constraint features (5, 3) \n",
|
"constraint features (5, 3) \n",
|
||||||
" [[ 1.3100000e+03 -1.5978307e-01 0.0000000e+00]\n",
|
" [[ 1.31000000e+03 -1.59783068e-01 0.00000000e+00]\n",
|
||||||
" [ 9.8800000e+02 -3.2881632e-01 0.0000000e+00]\n",
|
" [ 9.88000000e+02 -3.28816327e-01 0.00000000e+00]\n",
|
||||||
" [ 1.0040000e+03 -4.0601316e-01 0.0000000e+00]\n",
|
" [ 1.00400000e+03 -4.06013164e-01 0.00000000e+00]\n",
|
||||||
" [ 1.2690000e+03 -1.3659772e-01 0.0000000e+00]\n",
|
" [ 1.26900000e+03 -1.36597720e-01 0.00000000e+00]\n",
|
||||||
" [ 1.0070000e+03 -2.8800571e-01 0.0000000e+00]]\n"
|
" [ 1.00700000e+03 -2.88005696e-01 0.00000000e+00]]\n"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
|
|||||||
@@ -15,7 +15,7 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"Before presenting the primal components themselves, we briefly discuss the three ways a solution may be provided to the solver. Each approach has benefits and limitations, which we also discuss in this section. All primal components can be configured to use any of the following approaches.\n",
|
"Before presenting the primal components themselves, we briefly discuss the three ways a solution may be provided to the solver. Each approach has benefits and limitations, which we also discuss in this section. All primal components can be configured to use any of the following approaches.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"The first approach is to provide the solution to the solver as a **warm start**. This is implemented by the class [SetWarmStart](SetWarmStart). The main advantage is that this method maintains all optimality and feasibility guarantees of the MIP solver, while still providing significant performance benefits for various classes of problems. If the machine learning model is able to predict multiple solutions, it is also possible to set multiple warm starts. In this case, the solver evaluates each warm start, discards the infeasible ones, then proceeds with the one that has the best objective value. The main disadvantage of this approach, compared to the next two, is that it provides relatively modest speedups for most problem classes, and no speedup at all for many others, even when the machine learning predictions are 100% accurate.\n",
|
"The first approach is to provide the solution to the solver as a **warm start**. This is implemented by the class [SetWarmStart][SetWarmStart]. The main advantage is that this method maintains all optimality and feasibility guarantees of the MIP solver, while still providing significant performance benefits for various classes of problems. If the machine learning model is able to predict multiple solutions, it is also possible to set multiple warm starts. In this case, the solver evaluates each warm start, discards the infeasible ones, then proceeds with the one that has the best objective value. The main disadvantage of this approach, compared to the next two, is that it provides relatively modest speedups for most problem classes, and no speedup at all for many others, even when the machine learning predictions are 100% accurate.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"[SetWarmStart]: ../../api/components/#miplearn.components.primal.actions.SetWarmStart\n",
|
"[SetWarmStart]: ../../api/components/#miplearn.components.primal.actions.SetWarmStart\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -70,8 +70,8 @@
|
|||||||
"name": "stdout",
|
"name": "stdout",
|
||||||
"output_type": "stream",
|
"output_type": "stream",
|
||||||
"text": [
|
"text": [
|
||||||
"Restricted license - for non-production use only - expires 2024-10-28\n",
|
"Restricted license - for non-production use only - expires 2026-11-23\n",
|
||||||
"Gurobi Optimizer version 10.0.3 build v10.0.3rc0 (linux64)\n",
|
"Gurobi Optimizer version 12.0.2 build v12.0.2rc0 (linux64 - \"Ubuntu 22.04.4 LTS\")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"CPU model: 13th Gen Intel(R) Core(TM) i7-13800H, instruction set [SSE2|AVX|AVX2]\n",
|
"CPU model: 13th Gen Intel(R) Core(TM) i7-13800H, instruction set [SSE2|AVX|AVX2]\n",
|
||||||
"Thread count: 10 physical cores, 20 logical processors, using up to 20 threads\n",
|
"Thread count: 10 physical cores, 20 logical processors, using up to 20 threads\n",
|
||||||
@@ -90,17 +90,21 @@
|
|||||||
" 0 6.3600000e+02 1.700000e+01 0.000000e+00 0s\n",
|
" 0 6.3600000e+02 1.700000e+01 0.000000e+00 0s\n",
|
||||||
" 15 2.7610000e+03 0.000000e+00 0.000000e+00 0s\n",
|
" 15 2.7610000e+03 0.000000e+00 0.000000e+00 0s\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Solved in 15 iterations and 0.00 seconds (0.00 work units)\n",
|
"Solved in 15 iterations and 0.01 seconds (0.00 work units)\n",
|
||||||
"Optimal objective 2.761000000e+03\n",
|
"Optimal objective 2.761000000e+03\n",
|
||||||
"\n",
|
"\n",
|
||||||
"User-callback calls 56, time in user-callback 0.00 sec\n",
|
"User-callback calls 56, time in user-callback 0.00 sec\n",
|
||||||
"Set parameter PreCrush to value 1\n",
|
"Set parameter PreCrush to value 1\n",
|
||||||
"Set parameter LazyConstraints to value 1\n",
|
"Set parameter LazyConstraints to value 1\n",
|
||||||
"Gurobi Optimizer version 10.0.3 build v10.0.3rc0 (linux64)\n",
|
"Gurobi Optimizer version 12.0.2 build v12.0.2rc0 (linux64 - \"Ubuntu 22.04.4 LTS\")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"CPU model: 13th Gen Intel(R) Core(TM) i7-13800H, instruction set [SSE2|AVX|AVX2]\n",
|
"CPU model: 13th Gen Intel(R) Core(TM) i7-13800H, instruction set [SSE2|AVX|AVX2]\n",
|
||||||
"Thread count: 10 physical cores, 20 logical processors, using up to 20 threads\n",
|
"Thread count: 10 physical cores, 20 logical processors, using up to 20 threads\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
"Non-default parameters:\n",
|
||||||
|
"PreCrush 1\n",
|
||||||
|
"LazyConstraints 1\n",
|
||||||
|
"\n",
|
||||||
"Optimize a model with 10 rows, 45 columns and 90 nonzeros\n",
|
"Optimize a model with 10 rows, 45 columns and 90 nonzeros\n",
|
||||||
"Model fingerprint: 0x74ca3d0a\n",
|
"Model fingerprint: 0x74ca3d0a\n",
|
||||||
"Variable types: 0 continuous, 45 integer (45 binary)\n",
|
"Variable types: 0 continuous, 45 integer (45 binary)\n",
|
||||||
@@ -123,12 +127,11 @@
|
|||||||
" Expl Unexpl | Obj Depth IntInf | Incumbent BestBd Gap | It/Node Time\n",
|
" Expl Unexpl | Obj Depth IntInf | Incumbent BestBd Gap | It/Node Time\n",
|
||||||
"\n",
|
"\n",
|
||||||
" 0 0 2761.00000 0 - 2796.00000 2761.00000 1.25% - 0s\n",
|
" 0 0 2761.00000 0 - 2796.00000 2761.00000 1.25% - 0s\n",
|
||||||
" 0 0 cutoff 0 2796.00000 2796.00000 0.00% - 0s\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"Cutting planes:\n",
|
"Cutting planes:\n",
|
||||||
" Lazy constraints: 3\n",
|
" Lazy constraints: 3\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Explored 1 nodes (16 simplex iterations) in 0.01 seconds (0.00 work units)\n",
|
"Explored 1 nodes (14 simplex iterations) in 0.01 seconds (0.00 work units)\n",
|
||||||
"Thread count was 20 (of 20 available processors)\n",
|
"Thread count was 20 (of 20 available processors)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Solution count 1: 2796 \n",
|
"Solution count 1: 2796 \n",
|
||||||
@@ -136,18 +139,8 @@
|
|||||||
"Optimal solution found (tolerance 1.00e-04)\n",
|
"Optimal solution found (tolerance 1.00e-04)\n",
|
||||||
"Best objective 2.796000000000e+03, best bound 2.796000000000e+03, gap 0.0000%\n",
|
"Best objective 2.796000000000e+03, best bound 2.796000000000e+03, gap 0.0000%\n",
|
||||||
"\n",
|
"\n",
|
||||||
"User-callback calls 110, time in user-callback 0.00 sec\n"
|
"User-callback calls 114, time in user-callback 0.00 sec\n"
|
||||||
]
|
]
|
||||||
},
|
|
||||||
{
|
|
||||||
"data": {
|
|
||||||
"text/plain": [
|
|
||||||
"{'WS: Count': 1, 'WS: Number of variables set': 41.0}"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"execution_count": 1,
|
|
||||||
"metadata": {},
|
|
||||||
"output_type": "execute_result"
|
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"source": [
|
"source": [
|
||||||
@@ -215,7 +208,7 @@
|
|||||||
"solver.fit(train_data)\n",
|
"solver.fit(train_data)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Solve a test instance\n",
|
"# Solve a test instance\n",
|
||||||
"solver.optimize(test_data[0], build_tsp_model_gurobipy)"
|
"solver.optimize(test_data[0], build_tsp_model_gurobipy);"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
MIPLearn
|
MIPLearn
|
||||||
========
|
========
|
||||||
**MIPLearn** is an extensible framework for solving discrete optimization problems using a combination of Mixed-Integer Linear Programming (MIP) and Machine Learning (ML). MIPLearn uses ML methods to automatically identify patterns in previously solved instances of the problem, then uses these patterns to accelerate the performance of conventional state-of-the-art MIP solvers such as CPLEX, Gurobi or XPRESS.
|
**MIPLearn** is an extensible framework for solving discrete optimization problems using a combination of Mixed-Integer Programming (MIP) and Machine Learning (ML). MIPLearn uses ML methods to automatically identify patterns in previously solved instances of the problem, then uses these patterns to accelerate the performance of conventional state-of-the-art MIP solvers such as CPLEX, Gurobi or XPRESS.
|
||||||
|
|
||||||
Unlike pure ML methods, MIPLearn is not only able to find high-quality solutions to discrete optimization problems, but it can also prove the optimality and feasibility of these solutions. Unlike conventional MIP solvers, MIPLearn can take full advantage of very specific observations that happen to be true in a particular family of instances (such as the observation that a particular constraint is typically redundant, or that a particular variable typically assumes a certain value). For certain classes of problems, this approach may provide significant performance benefits.
|
Unlike pure ML methods, MIPLearn is not only able to find high-quality solutions to discrete optimization problems, but it can also prove the optimality and feasibility of these solutions. Unlike conventional MIP solvers, MIPLearn can take full advantage of very specific observations that happen to be true in a particular family of instances (such as the observation that a particular constraint is typically redundant, or that a particular variable typically assumes a certain value). For certain classes of problems, this approach may provide significant performance benefits.
|
||||||
|
|
||||||
@@ -61,7 +61,7 @@ Citing MIPLearn
|
|||||||
|
|
||||||
If you use MIPLearn in your research (either the solver or the included problem generators), we kindly request that you cite the package as follows:
|
If you use MIPLearn in your research (either the solver or the included problem generators), we kindly request that you cite the package as follows:
|
||||||
|
|
||||||
* **Alinson S. Xavier, Feng Qiu, Xiaoyi Gu, Berkay Becu, Santanu S. Dey.** *MIPLearn: An Extensible Framework for Learning-Enhanced Optimization (Version 0.3)*. Zenodo (2023). DOI: https://doi.org/10.5281/zenodo.4287567
|
* **Alinson S. Xavier, Feng Qiu, Xiaoyi Gu, Berkay Becu, Santanu S. Dey.** *MIPLearn: An Extensible Framework for Learning-Enhanced Optimization (Version 0.4)*. Zenodo (2024). DOI: https://doi.org/10.5281/zenodo.4287567
|
||||||
|
|
||||||
If you use MIPLearn in the field of power systems optimization, we kindly request that you cite the reference below, in which the main techniques implemented in MIPLearn were first developed:
|
If you use MIPLearn in the field of power systems optimization, we kindly request that you cite the reference below, in which the main techniques implemented in MIPLearn were first developed:
|
||||||
|
|
||||||
|
|||||||
@@ -286,12 +286,16 @@
|
|||||||
"output_type": "stream",
|
"output_type": "stream",
|
||||||
"text": [
|
"text": [
|
||||||
"Set parameter Threads to value 1\n",
|
"Set parameter Threads to value 1\n",
|
||||||
"Restricted license - for non-production use only - expires 2024-10-28\n",
|
"Read parameters from file gurobi.env\n",
|
||||||
"Gurobi Optimizer version 10.0.3 build v10.0.3rc0 (linux64)\n",
|
"Restricted license - for non-production use only - expires 2026-11-23\n",
|
||||||
|
"Gurobi Optimizer version 12.0.2 build v12.0.2rc0 (linux64 - \"Ubuntu 22.04.4 LTS\")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"CPU model: 13th Gen Intel(R) Core(TM) i7-13800H, instruction set [SSE2|AVX|AVX2]\n",
|
"CPU model: 13th Gen Intel(R) Core(TM) i7-13800H, instruction set [SSE2|AVX|AVX2]\n",
|
||||||
"Thread count: 10 physical cores, 20 logical processors, using up to 1 threads\n",
|
"Thread count: 10 physical cores, 20 logical processors, using up to 1 threads\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
"Non-default parameters:\n",
|
||||||
|
"Threads 1\n",
|
||||||
|
"\n",
|
||||||
"Optimize a model with 50 rows, 1225 columns and 2450 nonzeros\n",
|
"Optimize a model with 50 rows, 1225 columns and 2450 nonzeros\n",
|
||||||
"Model fingerprint: 0x04d7bec1\n",
|
"Model fingerprint: 0x04d7bec1\n",
|
||||||
"Coefficient statistics:\n",
|
"Coefficient statistics:\n",
|
||||||
@@ -309,7 +313,7 @@
|
|||||||
"Solved in 66 iterations and 0.01 seconds (0.00 work units)\n",
|
"Solved in 66 iterations and 0.01 seconds (0.00 work units)\n",
|
||||||
"Optimal objective 5.588000000e+03\n",
|
"Optimal objective 5.588000000e+03\n",
|
||||||
"\n",
|
"\n",
|
||||||
"User-callback calls 107, time in user-callback 0.00 sec\n"
|
"User-callback calls 110, time in user-callback 0.00 sec\n"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -327,11 +331,16 @@
|
|||||||
"Enforcing 19 subtour elimination constraints\n",
|
"Enforcing 19 subtour elimination constraints\n",
|
||||||
"Set parameter PreCrush to value 1\n",
|
"Set parameter PreCrush to value 1\n",
|
||||||
"Set parameter LazyConstraints to value 1\n",
|
"Set parameter LazyConstraints to value 1\n",
|
||||||
"Gurobi Optimizer version 10.0.3 build v10.0.3rc0 (linux64)\n",
|
"Gurobi Optimizer version 12.0.2 build v12.0.2rc0 (linux64 - \"Ubuntu 22.04.4 LTS\")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"CPU model: 13th Gen Intel(R) Core(TM) i7-13800H, instruction set [SSE2|AVX|AVX2]\n",
|
"CPU model: 13th Gen Intel(R) Core(TM) i7-13800H, instruction set [SSE2|AVX|AVX2]\n",
|
||||||
"Thread count: 10 physical cores, 20 logical processors, using up to 1 threads\n",
|
"Thread count: 10 physical cores, 20 logical processors, using up to 1 threads\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
"Non-default parameters:\n",
|
||||||
|
"PreCrush 1\n",
|
||||||
|
"Threads 1\n",
|
||||||
|
"LazyConstraints 1\n",
|
||||||
|
"\n",
|
||||||
"Optimize a model with 69 rows, 1225 columns and 6091 nonzeros\n",
|
"Optimize a model with 69 rows, 1225 columns and 6091 nonzeros\n",
|
||||||
"Model fingerprint: 0x09bd34d6\n",
|
"Model fingerprint: 0x09bd34d6\n",
|
||||||
"Variable types: 0 continuous, 1225 integer (1225 binary)\n",
|
"Variable types: 0 continuous, 1225 integer (1225 binary)\n",
|
||||||
@@ -356,23 +365,29 @@
|
|||||||
"Enforcing 3 subtour elimination constraints\n",
|
"Enforcing 3 subtour elimination constraints\n",
|
||||||
" 0 0 6165.50000 0 6 6390.00000 6165.50000 3.51% - 0s\n",
|
" 0 0 6165.50000 0 6 6390.00000 6165.50000 3.51% - 0s\n",
|
||||||
" 0 0 6198.50000 0 16 6390.00000 6198.50000 3.00% - 0s\n",
|
" 0 0 6198.50000 0 16 6390.00000 6198.50000 3.00% - 0s\n",
|
||||||
|
" 0 0 6210.50000 0 6 6390.00000 6210.50000 2.81% - 0s\n",
|
||||||
|
" 0 0 6212.60000 0 31 6390.00000 6212.60000 2.78% - 0s\n",
|
||||||
|
"H 0 0 6241.0000000 6212.60000 0.46% - 0s\n",
|
||||||
"* 0 0 0 6219.0000000 6219.00000 0.00% - 0s\n",
|
"* 0 0 0 6219.0000000 6219.00000 0.00% - 0s\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Cutting planes:\n",
|
"Cutting planes:\n",
|
||||||
" Gomory: 11\n",
|
" Gomory: 6\n",
|
||||||
|
" Clique: 1\n",
|
||||||
" MIR: 1\n",
|
" MIR: 1\n",
|
||||||
|
" StrongCG: 1\n",
|
||||||
" Zero half: 4\n",
|
" Zero half: 4\n",
|
||||||
|
" RLT: 1\n",
|
||||||
" Lazy constraints: 3\n",
|
" Lazy constraints: 3\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Explored 1 nodes (222 simplex iterations) in 0.03 seconds (0.02 work units)\n",
|
"Explored 1 nodes (219 simplex iterations) in 0.04 seconds (0.03 work units)\n",
|
||||||
"Thread count was 1 (of 20 available processors)\n",
|
"Thread count was 1 (of 20 available processors)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Solution count 3: 6219 6390 29853 \n",
|
"Solution count 4: 6219 6241 6390 29853 \n",
|
||||||
"\n",
|
"\n",
|
||||||
"Optimal solution found (tolerance 1.00e-04)\n",
|
"Optimal solution found (tolerance 1.00e-04)\n",
|
||||||
"Best objective 6.219000000000e+03, best bound 6.219000000000e+03, gap 0.0000%\n",
|
"Best objective 6.219000000000e+03, best bound 6.219000000000e+03, gap 0.0000%\n",
|
||||||
"\n",
|
"\n",
|
||||||
"User-callback calls 141, time in user-callback 0.00 sec\n"
|
"User-callback calls 163, time in user-callback 0.00 sec\n"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
@@ -402,11 +417,14 @@
|
|||||||
"name": "stdout",
|
"name": "stdout",
|
||||||
"output_type": "stream",
|
"output_type": "stream",
|
||||||
"text": [
|
"text": [
|
||||||
"Gurobi Optimizer version 10.0.3 build v10.0.3rc0 (linux64)\n",
|
"Gurobi Optimizer version 12.0.2 build v12.0.2rc0 (linux64 - \"Ubuntu 22.04.4 LTS\")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"CPU model: 13th Gen Intel(R) Core(TM) i7-13800H, instruction set [SSE2|AVX|AVX2]\n",
|
"CPU model: 13th Gen Intel(R) Core(TM) i7-13800H, instruction set [SSE2|AVX|AVX2]\n",
|
||||||
"Thread count: 10 physical cores, 20 logical processors, using up to 1 threads\n",
|
"Thread count: 10 physical cores, 20 logical processors, using up to 1 threads\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
"Non-default parameters:\n",
|
||||||
|
"Threads 1\n",
|
||||||
|
"\n",
|
||||||
"Optimize a model with 50 rows, 1225 columns and 2450 nonzeros\n",
|
"Optimize a model with 50 rows, 1225 columns and 2450 nonzeros\n",
|
||||||
"Model fingerprint: 0x04d7bec1\n",
|
"Model fingerprint: 0x04d7bec1\n",
|
||||||
"Coefficient statistics:\n",
|
"Coefficient statistics:\n",
|
||||||
@@ -424,14 +442,19 @@
|
|||||||
"Solved in 66 iterations and 0.01 seconds (0.00 work units)\n",
|
"Solved in 66 iterations and 0.01 seconds (0.00 work units)\n",
|
||||||
"Optimal objective 5.588000000e+03\n",
|
"Optimal objective 5.588000000e+03\n",
|
||||||
"\n",
|
"\n",
|
||||||
"User-callback calls 107, time in user-callback 0.00 sec\n",
|
"User-callback calls 110, time in user-callback 0.00 sec\n",
|
||||||
"Set parameter PreCrush to value 1\n",
|
"Set parameter PreCrush to value 1\n",
|
||||||
"Set parameter LazyConstraints to value 1\n",
|
"Set parameter LazyConstraints to value 1\n",
|
||||||
"Gurobi Optimizer version 10.0.3 build v10.0.3rc0 (linux64)\n",
|
"Gurobi Optimizer version 12.0.2 build v12.0.2rc0 (linux64 - \"Ubuntu 22.04.4 LTS\")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"CPU model: 13th Gen Intel(R) Core(TM) i7-13800H, instruction set [SSE2|AVX|AVX2]\n",
|
"CPU model: 13th Gen Intel(R) Core(TM) i7-13800H, instruction set [SSE2|AVX|AVX2]\n",
|
||||||
"Thread count: 10 physical cores, 20 logical processors, using up to 1 threads\n",
|
"Thread count: 10 physical cores, 20 logical processors, using up to 1 threads\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
"Non-default parameters:\n",
|
||||||
|
"PreCrush 1\n",
|
||||||
|
"Threads 1\n",
|
||||||
|
"LazyConstraints 1\n",
|
||||||
|
"\n",
|
||||||
"Optimize a model with 50 rows, 1225 columns and 2450 nonzeros\n",
|
"Optimize a model with 50 rows, 1225 columns and 2450 nonzeros\n",
|
||||||
"Model fingerprint: 0x77a94572\n",
|
"Model fingerprint: 0x77a94572\n",
|
||||||
"Variable types: 0 continuous, 1225 integer (1225 binary)\n",
|
"Variable types: 0 continuous, 1225 integer (1225 binary)\n",
|
||||||
@@ -452,39 +475,46 @@
|
|||||||
"\n",
|
"\n",
|
||||||
" 0 0 5588.00000 0 12 29695.0000 5588.00000 81.2% - 0s\n",
|
" 0 0 5588.00000 0 12 29695.0000 5588.00000 81.2% - 0s\n",
|
||||||
"Enforcing 9 subtour elimination constraints\n",
|
"Enforcing 9 subtour elimination constraints\n",
|
||||||
"Enforcing 11 subtour elimination constraints\n",
|
"Enforcing 9 subtour elimination constraints\n",
|
||||||
"H 0 0 27241.000000 5588.00000 79.5% - 0s\n",
|
"H 0 0 24919.000000 5588.00000 77.6% - 0s\n",
|
||||||
" 0 0 5898.00000 0 8 27241.0000 5898.00000 78.3% - 0s\n",
|
" 0 0 5847.50000 0 14 24919.0000 5847.50000 76.5% - 0s\n",
|
||||||
"Enforcing 4 subtour elimination constraints\n",
|
"Enforcing 5 subtour elimination constraints\n",
|
||||||
|
"Enforcing 5 subtour elimination constraints\n",
|
||||||
"Enforcing 3 subtour elimination constraints\n",
|
"Enforcing 3 subtour elimination constraints\n",
|
||||||
" 0 0 6066.00000 0 - 27241.0000 6066.00000 77.7% - 0s\n",
|
|
||||||
"Enforcing 2 subtour elimination constraints\n",
|
"Enforcing 2 subtour elimination constraints\n",
|
||||||
" 0 0 6128.00000 0 - 27241.0000 6128.00000 77.5% - 0s\n",
|
"H 0 0 7764.0000000 5847.50000 24.7% - 0s\n",
|
||||||
" 0 0 6139.00000 0 6 27241.0000 6139.00000 77.5% - 0s\n",
|
"H 0 0 6684.0000000 5847.50000 12.5% - 0s\n",
|
||||||
"H 0 0 6368.0000000 6139.00000 3.60% - 0s\n",
|
" 0 0 6013.75000 0 11 6684.00000 6013.75000 10.0% - 0s\n",
|
||||||
" 0 0 6154.75000 0 15 6368.00000 6154.75000 3.35% - 0s\n",
|
"H 0 0 6340.0000000 6013.75000 5.15% - 0s\n",
|
||||||
"Enforcing 2 subtour elimination constraints\n",
|
|
||||||
" 0 0 6154.75000 0 6 6368.00000 6154.75000 3.35% - 0s\n",
|
|
||||||
" 0 0 6165.75000 0 11 6368.00000 6165.75000 3.18% - 0s\n",
|
|
||||||
"Enforcing 3 subtour elimination constraints\n",
|
"Enforcing 3 subtour elimination constraints\n",
|
||||||
" 0 0 6204.00000 0 6 6368.00000 6204.00000 2.58% - 0s\n",
|
"Enforcing 2 subtour elimination constraints\n",
|
||||||
"* 0 0 0 6219.0000000 6219.00000 0.00% - 0s\n",
|
" 0 0 6095.00000 0 10 6340.00000 6095.00000 3.86% - 0s\n",
|
||||||
|
"Enforcing 3 subtour elimination constraints\n",
|
||||||
|
"Enforcing 2 subtour elimination constraints\n",
|
||||||
|
" 0 0 6128.00000 0 - 6340.00000 6128.00000 3.34% - 0s\n",
|
||||||
|
" 0 0 6139.00000 0 6 6340.00000 6139.00000 3.17% - 0s\n",
|
||||||
|
"Enforcing 2 subtour elimination constraints\n",
|
||||||
|
" 0 0 6187.25000 0 17 6340.00000 6187.25000 2.41% - 0s\n",
|
||||||
|
"Enforcing 2 subtour elimination constraints\n",
|
||||||
|
"Enforcing 2 subtour elimination constraints\n",
|
||||||
|
" 0 0 6201.00000 0 15 6340.00000 6201.00000 2.19% - 0s\n",
|
||||||
|
" 0 0 6201.00000 0 15 6340.00000 6201.00000 2.19% - 0s\n",
|
||||||
|
"H 0 0 6219.0000000 6201.00000 0.29% - 0s\n",
|
||||||
|
"Enforcing 3 subtour elimination constraints\n",
|
||||||
|
" 0 0 infeasible 0 6219.00000 6219.00000 0.00% - 0s\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Cutting planes:\n",
|
"Cutting planes:\n",
|
||||||
" Gomory: 5\n",
|
" Lazy constraints: 2\n",
|
||||||
" MIR: 1\n",
|
|
||||||
" Zero half: 4\n",
|
|
||||||
" Lazy constraints: 4\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"Explored 1 nodes (224 simplex iterations) in 0.10 seconds (0.03 work units)\n",
|
"Explored 1 nodes (217 simplex iterations) in 0.12 seconds (0.05 work units)\n",
|
||||||
"Thread count was 1 (of 20 available processors)\n",
|
"Thread count was 1 (of 20 available processors)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Solution count 4: 6219 6368 27241 29695 \n",
|
"Solution count 6: 6219 6340 6684 ... 29695\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Optimal solution found (tolerance 1.00e-04)\n",
|
"Optimal solution found (tolerance 1.00e-04)\n",
|
||||||
"Best objective 6.219000000000e+03, best bound 6.219000000000e+03, gap 0.0000%\n",
|
"Best objective 6.219000000000e+03, best bound 6.219000000000e+03, gap 0.0000%\n",
|
||||||
"\n",
|
"\n",
|
||||||
"User-callback calls 170, time in user-callback 0.01 sec\n"
|
"User-callback calls 216, time in user-callback 0.06 sec\n"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
|
|||||||
@@ -45,16 +45,10 @@
|
|||||||
"- Python version, compatible with the Pyomo and Gurobipy modeling languages,\n",
|
"- Python version, compatible with the Pyomo and Gurobipy modeling languages,\n",
|
||||||
"- Julia version, compatible with the JuMP modeling language.\n",
|
"- Julia version, compatible with the JuMP modeling language.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"In this tutorial, we will demonstrate how to use and install the Python/Gurobipy version of the package. The first step is to install Python 3.8+ in your computer. See the [official Python website for more instructions](https://www.python.org/downloads/). After Python is installed, we proceed to install MIPLearn using `pip`:\n",
|
"In this tutorial, we will demonstrate how to use and install the Python/Gurobipy version of the package. The first step is to install Python 3.9+ in your computer. See the [official Python website for more instructions](https://www.python.org/downloads/). After Python is installed, we proceed to install MIPLearn using `pip`:\n",
|
||||||
"\n",
|
"\n",
|
||||||
"```\n",
|
"```\n",
|
||||||
"$ pip install MIPLearn==0.3\n",
|
"$ pip install MIPLearn~=0.4\n",
|
||||||
"```\n",
|
|
||||||
"\n",
|
|
||||||
"In addition to MIPLearn itself, we will also install Gurobi 10.0, a state-of-the-art commercial MILP solver. This step also install a demo license for Gurobi, which should able to solve the small optimization problems in this tutorial. A license is required for solving larger-scale problems.\n",
|
|
||||||
"\n",
|
|
||||||
"```\n",
|
|
||||||
"$ pip install 'gurobipy>=10,<10.1'\n",
|
|
||||||
"```"
|
"```"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -220,11 +214,16 @@
|
|||||||
"name": "stdout",
|
"name": "stdout",
|
||||||
"output_type": "stream",
|
"output_type": "stream",
|
||||||
"text": [
|
"text": [
|
||||||
"Restricted license - for non-production use only - expires 2024-10-28\n",
|
"Set parameter Threads to value 1\n",
|
||||||
"Gurobi Optimizer version 10.0.3 build v10.0.3rc0 (linux64)\n",
|
"Read parameters from file gurobi.env\n",
|
||||||
|
"Restricted license - for non-production use only - expires 2026-11-23\n",
|
||||||
|
"Gurobi Optimizer version 12.0.2 build v12.0.2rc0 (linux64 - \"Ubuntu 22.04.4 LTS\")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"CPU model: 13th Gen Intel(R) Core(TM) i7-13800H, instruction set [SSE2|AVX|AVX2]\n",
|
"CPU model: 13th Gen Intel(R) Core(TM) i7-13800H, instruction set [SSE2|AVX|AVX2]\n",
|
||||||
"Thread count: 10 physical cores, 20 logical processors, using up to 20 threads\n",
|
"Thread count: 10 physical cores, 20 logical processors, using up to 1 threads\n",
|
||||||
|
"\n",
|
||||||
|
"Non-default parameters:\n",
|
||||||
|
"Threads 1\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Optimize a model with 7 rows, 6 columns and 15 nonzeros\n",
|
"Optimize a model with 7 rows, 6 columns and 15 nonzeros\n",
|
||||||
"Model fingerprint: 0x58dfdd53\n",
|
"Model fingerprint: 0x58dfdd53\n",
|
||||||
@@ -234,28 +233,28 @@
|
|||||||
" Objective range [2e+00, 7e+02]\n",
|
" Objective range [2e+00, 7e+02]\n",
|
||||||
" Bounds range [1e+00, 1e+00]\n",
|
" Bounds range [1e+00, 1e+00]\n",
|
||||||
" RHS range [1e+02, 1e+02]\n",
|
" RHS range [1e+02, 1e+02]\n",
|
||||||
"Presolve removed 2 rows and 1 columns\n",
|
"Presolve removed 6 rows and 3 columns\n",
|
||||||
"Presolve time: 0.00s\n",
|
"Presolve time: 0.00s\n",
|
||||||
"Presolved: 5 rows, 5 columns, 13 nonzeros\n",
|
"Presolved: 1 rows, 3 columns, 3 nonzeros\n",
|
||||||
"Variable types: 0 continuous, 5 integer (3 binary)\n",
|
"Variable types: 0 continuous, 3 integer (1 binary)\n",
|
||||||
"Found heuristic solution: objective 1400.0000000\n",
|
"Found heuristic solution: objective 1990.0000000\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Root relaxation: objective 1.035000e+03, 3 iterations, 0.00 seconds (0.00 work units)\n",
|
"Root relaxation: objective 1.320000e+03, 0 iterations, 0.00 seconds (0.00 work units)\n",
|
||||||
"\n",
|
"\n",
|
||||||
" Nodes | Current Node | Objective Bounds | Work\n",
|
" Nodes | Current Node | Objective Bounds | Work\n",
|
||||||
" Expl Unexpl | Obj Depth IntInf | Incumbent BestBd Gap | It/Node Time\n",
|
" Expl Unexpl | Obj Depth IntInf | Incumbent BestBd Gap | It/Node Time\n",
|
||||||
"\n",
|
"\n",
|
||||||
" 0 0 1035.00000 0 1 1400.00000 1035.00000 26.1% - 0s\n",
|
|
||||||
" 0 0 1105.71429 0 1 1400.00000 1105.71429 21.0% - 0s\n",
|
|
||||||
"* 0 0 0 1320.0000000 1320.00000 0.00% - 0s\n",
|
"* 0 0 0 1320.0000000 1320.00000 0.00% - 0s\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Explored 1 nodes (5 simplex iterations) in 0.01 seconds (0.00 work units)\n",
|
"Explored 1 nodes (0 simplex iterations) in 0.01 seconds (0.00 work units)\n",
|
||||||
"Thread count was 20 (of 20 available processors)\n",
|
"Thread count was 1 (of 20 available processors)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Solution count 2: 1320 1400 \n",
|
"Solution count 2: 1320 1990 \n",
|
||||||
"\n",
|
"\n",
|
||||||
"Optimal solution found (tolerance 1.00e-04)\n",
|
"Optimal solution found (tolerance 1.00e-04)\n",
|
||||||
"Best objective 1.320000000000e+03, best bound 1.320000000000e+03, gap 0.0000%\n",
|
"Best objective 1.320000000000e+03, best bound 1.320000000000e+03, gap 0.0000%\n",
|
||||||
|
"\n",
|
||||||
|
"User-callback calls 541, time in user-callback 0.00 sec\n",
|
||||||
"obj = 1320.0\n",
|
"obj = 1320.0\n",
|
||||||
"x = [-0.0, 1.0, 1.0]\n",
|
"x = [-0.0, 1.0, 1.0]\n",
|
||||||
"y = [0.0, 60.0, 40.0]\n"
|
"y = [0.0, 60.0, 40.0]\n"
|
||||||
@@ -401,7 +400,7 @@
|
|||||||
"from miplearn.collectors.basic import BasicCollector\n",
|
"from miplearn.collectors.basic import BasicCollector\n",
|
||||||
"\n",
|
"\n",
|
||||||
"bc = BasicCollector()\n",
|
"bc = BasicCollector()\n",
|
||||||
"bc.collect(train_data, build_uc_model, n_jobs=4)"
|
"bc.collect(train_data, build_uc_model)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -480,10 +479,13 @@
|
|||||||
"name": "stdout",
|
"name": "stdout",
|
||||||
"output_type": "stream",
|
"output_type": "stream",
|
||||||
"text": [
|
"text": [
|
||||||
"Gurobi Optimizer version 10.0.3 build v10.0.3rc0 (linux64)\n",
|
"Gurobi Optimizer version 12.0.2 build v12.0.2rc0 (linux64 - \"Ubuntu 22.04.4 LTS\")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"CPU model: 13th Gen Intel(R) Core(TM) i7-13800H, instruction set [SSE2|AVX|AVX2]\n",
|
"CPU model: 13th Gen Intel(R) Core(TM) i7-13800H, instruction set [SSE2|AVX|AVX2]\n",
|
||||||
"Thread count: 10 physical cores, 20 logical processors, using up to 20 threads\n",
|
"Thread count: 10 physical cores, 20 logical processors, using up to 1 threads\n",
|
||||||
|
"\n",
|
||||||
|
"Non-default parameters:\n",
|
||||||
|
"Threads 1\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Optimize a model with 1001 rows, 1000 columns and 2500 nonzeros\n",
|
"Optimize a model with 1001 rows, 1000 columns and 2500 nonzeros\n",
|
||||||
"Model fingerprint: 0xa8b70287\n",
|
"Model fingerprint: 0xa8b70287\n",
|
||||||
@@ -493,22 +495,27 @@
|
|||||||
" Bounds range [1e+00, 1e+00]\n",
|
" Bounds range [1e+00, 1e+00]\n",
|
||||||
" RHS range [3e+08, 3e+08]\n",
|
" RHS range [3e+08, 3e+08]\n",
|
||||||
"Presolve removed 1000 rows and 500 columns\n",
|
"Presolve removed 1000 rows and 500 columns\n",
|
||||||
"Presolve time: 0.01s\n",
|
"Presolve time: 0.00s\n",
|
||||||
"Presolved: 1 rows, 500 columns, 500 nonzeros\n",
|
"Presolved: 1 rows, 500 columns, 500 nonzeros\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Iteration Objective Primal Inf. Dual Inf. Time\n",
|
"Iteration Objective Primal Inf. Dual Inf. Time\n",
|
||||||
" 0 6.6166537e+09 5.648803e+04 0.000000e+00 0s\n",
|
" 0 6.6166537e+09 5.648803e+04 0.000000e+00 0s\n",
|
||||||
" 1 8.2906219e+09 0.000000e+00 0.000000e+00 0s\n",
|
" 1 8.2906219e+09 0.000000e+00 0.000000e+00 0s\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Solved in 1 iterations and 0.01 seconds (0.00 work units)\n",
|
"Solved in 1 iterations and 0.02 seconds (0.00 work units)\n",
|
||||||
"Optimal objective 8.290621916e+09\n",
|
"Optimal objective 8.290621916e+09\n",
|
||||||
"Gurobi Optimizer version 10.0.3 build v10.0.3rc0 (linux64)\n",
|
"\n",
|
||||||
|
"User-callback calls 59, time in user-callback 0.00 sec\n",
|
||||||
|
"Gurobi Optimizer version 12.0.2 build v12.0.2rc0 (linux64 - \"Ubuntu 22.04.4 LTS\")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"CPU model: 13th Gen Intel(R) Core(TM) i7-13800H, instruction set [SSE2|AVX|AVX2]\n",
|
"CPU model: 13th Gen Intel(R) Core(TM) i7-13800H, instruction set [SSE2|AVX|AVX2]\n",
|
||||||
"Thread count: 10 physical cores, 20 logical processors, using up to 20 threads\n",
|
"Thread count: 10 physical cores, 20 logical processors, using up to 1 threads\n",
|
||||||
|
"\n",
|
||||||
|
"Non-default parameters:\n",
|
||||||
|
"Threads 1\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Optimize a model with 1001 rows, 1000 columns and 2500 nonzeros\n",
|
"Optimize a model with 1001 rows, 1000 columns and 2500 nonzeros\n",
|
||||||
"Model fingerprint: 0xcf27855a\n",
|
"Model fingerprint: 0x892e56b2\n",
|
||||||
"Variable types: 500 continuous, 500 integer (500 binary)\n",
|
"Variable types: 500 continuous, 500 integer (500 binary)\n",
|
||||||
"Coefficient statistics:\n",
|
"Coefficient statistics:\n",
|
||||||
" Matrix range [1e+00, 2e+06]\n",
|
" Matrix range [1e+00, 2e+06]\n",
|
||||||
@@ -516,15 +523,18 @@
|
|||||||
" Bounds range [1e+00, 1e+00]\n",
|
" Bounds range [1e+00, 1e+00]\n",
|
||||||
" RHS range [3e+08, 3e+08]\n",
|
" RHS range [3e+08, 3e+08]\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
"User MIP start produced solution with objective 8.29824e+09 (0.00s)\n",
|
||||||
|
"User MIP start produced solution with objective 8.29398e+09 (0.00s)\n",
|
||||||
"User MIP start produced solution with objective 8.29153e+09 (0.01s)\n",
|
"User MIP start produced solution with objective 8.29153e+09 (0.01s)\n",
|
||||||
"User MIP start produced solution with objective 8.29153e+09 (0.01s)\n",
|
"User MIP start produced solution with objective 8.29153e+09 (0.01s)\n",
|
||||||
"Loaded user MIP start with objective 8.29153e+09\n",
|
"Loaded user MIP start with objective 8.29153e+09\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
"Presolve removed 500 rows and 0 columns\n",
|
||||||
"Presolve time: 0.00s\n",
|
"Presolve time: 0.00s\n",
|
||||||
"Presolved: 1001 rows, 1000 columns, 2500 nonzeros\n",
|
"Presolved: 501 rows, 1000 columns, 2000 nonzeros\n",
|
||||||
"Variable types: 500 continuous, 500 integer (500 binary)\n",
|
"Variable types: 500 continuous, 500 integer (500 binary)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Root relaxation: objective 8.290622e+09, 512 iterations, 0.00 seconds (0.00 work units)\n",
|
"Root relaxation: objective 8.290622e+09, 501 iterations, 0.00 seconds (0.02 work units)\n",
|
||||||
"\n",
|
"\n",
|
||||||
" Nodes | Current Node | Objective Bounds | Work\n",
|
" Nodes | Current Node | Objective Bounds | Work\n",
|
||||||
" Expl Unexpl | Obj Depth IntInf | Incumbent BestBd Gap | It/Node Time\n",
|
" Expl Unexpl | Obj Depth IntInf | Incumbent BestBd Gap | It/Node Time\n",
|
||||||
@@ -532,25 +542,29 @@
|
|||||||
" 0 0 8.2906e+09 0 1 8.2915e+09 8.2906e+09 0.01% - 0s\n",
|
" 0 0 8.2906e+09 0 1 8.2915e+09 8.2906e+09 0.01% - 0s\n",
|
||||||
" 0 0 8.2907e+09 0 3 8.2915e+09 8.2907e+09 0.01% - 0s\n",
|
" 0 0 8.2907e+09 0 3 8.2915e+09 8.2907e+09 0.01% - 0s\n",
|
||||||
" 0 0 8.2907e+09 0 1 8.2915e+09 8.2907e+09 0.01% - 0s\n",
|
" 0 0 8.2907e+09 0 1 8.2915e+09 8.2907e+09 0.01% - 0s\n",
|
||||||
" 0 0 8.2907e+09 0 2 8.2915e+09 8.2907e+09 0.01% - 0s\n",
|
" 0 0 8.2907e+09 0 1 8.2915e+09 8.2907e+09 0.01% - 0s\n",
|
||||||
|
" 0 0 - 0 8.2915e+09 8.2907e+09 0.01% - 0s\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Cutting planes:\n",
|
"Cutting planes:\n",
|
||||||
" Gomory: 1\n",
|
" Gomory: 1\n",
|
||||||
" Flow cover: 2\n",
|
" RLT: 2\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Explored 1 nodes (565 simplex iterations) in 0.03 seconds (0.01 work units)\n",
|
"Explored 1 nodes (550 simplex iterations) in 0.04 seconds (0.04 work units)\n",
|
||||||
"Thread count was 20 (of 20 available processors)\n",
|
"Thread count was 1 (of 20 available processors)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Solution count 1: 8.29153e+09 \n",
|
"Solution count 4: 8.29153e+09 8.29398e+09 8.29695e+09 8.29824e+09 \n",
|
||||||
"\n",
|
"\n",
|
||||||
"Optimal solution found (tolerance 1.00e-04)\n",
|
"Optimal solution found (tolerance 1.00e-04)\n",
|
||||||
"Best objective 8.291528276179e+09, best bound 8.290733258025e+09, gap 0.0096%\n"
|
"Best objective 8.291528276179e+09, best bound 8.290709658754e+09, gap 0.0099%\n",
|
||||||
|
"\n",
|
||||||
|
"User-callback calls 799, time in user-callback 0.00 sec\n"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"data": {
|
"data": {
|
||||||
"text/plain": [
|
"text/plain": [
|
||||||
"{'WS: Count': 1, 'WS: Number of variables set': 482.0}"
|
"(<miplearn.solvers.gurobi.GurobiModel at 0x7f2bcd72cfd0>,\n",
|
||||||
|
" {'WS: Count': 1, 'WS: Number of variables set': 477.0})"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"execution_count": 8,
|
"execution_count": 8,
|
||||||
@@ -589,10 +603,13 @@
|
|||||||
"name": "stdout",
|
"name": "stdout",
|
||||||
"output_type": "stream",
|
"output_type": "stream",
|
||||||
"text": [
|
"text": [
|
||||||
"Gurobi Optimizer version 10.0.3 build v10.0.3rc0 (linux64)\n",
|
"Gurobi Optimizer version 12.0.2 build v12.0.2rc0 (linux64 - \"Ubuntu 22.04.4 LTS\")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"CPU model: 13th Gen Intel(R) Core(TM) i7-13800H, instruction set [SSE2|AVX|AVX2]\n",
|
"CPU model: 13th Gen Intel(R) Core(TM) i7-13800H, instruction set [SSE2|AVX|AVX2]\n",
|
||||||
"Thread count: 10 physical cores, 20 logical processors, using up to 20 threads\n",
|
"Thread count: 10 physical cores, 20 logical processors, using up to 1 threads\n",
|
||||||
|
"\n",
|
||||||
|
"Non-default parameters:\n",
|
||||||
|
"Threads 1\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Optimize a model with 1001 rows, 1000 columns and 2500 nonzeros\n",
|
"Optimize a model with 1001 rows, 1000 columns and 2500 nonzeros\n",
|
||||||
"Model fingerprint: 0xa8b70287\n",
|
"Model fingerprint: 0xa8b70287\n",
|
||||||
@@ -611,10 +628,15 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"Solved in 1 iterations and 0.01 seconds (0.00 work units)\n",
|
"Solved in 1 iterations and 0.01 seconds (0.00 work units)\n",
|
||||||
"Optimal objective 8.290621916e+09\n",
|
"Optimal objective 8.290621916e+09\n",
|
||||||
"Gurobi Optimizer version 10.0.3 build v10.0.3rc0 (linux64)\n",
|
"\n",
|
||||||
|
"User-callback calls 59, time in user-callback 0.00 sec\n",
|
||||||
|
"Gurobi Optimizer version 12.0.2 build v12.0.2rc0 (linux64 - \"Ubuntu 22.04.4 LTS\")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"CPU model: 13th Gen Intel(R) Core(TM) i7-13800H, instruction set [SSE2|AVX|AVX2]\n",
|
"CPU model: 13th Gen Intel(R) Core(TM) i7-13800H, instruction set [SSE2|AVX|AVX2]\n",
|
||||||
"Thread count: 10 physical cores, 20 logical processors, using up to 20 threads\n",
|
"Thread count: 10 physical cores, 20 logical processors, using up to 1 threads\n",
|
||||||
|
"\n",
|
||||||
|
"Non-default parameters:\n",
|
||||||
|
"Threads 1\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Optimize a model with 1001 rows, 1000 columns and 2500 nonzeros\n",
|
"Optimize a model with 1001 rows, 1000 columns and 2500 nonzeros\n",
|
||||||
"Model fingerprint: 0x4cbbf7c7\n",
|
"Model fingerprint: 0x4cbbf7c7\n",
|
||||||
@@ -624,58 +646,59 @@
|
|||||||
" Objective range [1e+00, 6e+07]\n",
|
" Objective range [1e+00, 6e+07]\n",
|
||||||
" Bounds range [1e+00, 1e+00]\n",
|
" Bounds range [1e+00, 1e+00]\n",
|
||||||
" RHS range [3e+08, 3e+08]\n",
|
" RHS range [3e+08, 3e+08]\n",
|
||||||
|
"Presolve removed 500 rows and 0 columns\n",
|
||||||
"Presolve time: 0.00s\n",
|
"Presolve time: 0.00s\n",
|
||||||
"Presolved: 1001 rows, 1000 columns, 2500 nonzeros\n",
|
"Presolved: 501 rows, 1000 columns, 2000 nonzeros\n",
|
||||||
"Variable types: 500 continuous, 500 integer (500 binary)\n",
|
"Variable types: 500 continuous, 500 integer (500 binary)\n",
|
||||||
"Found heuristic solution: objective 9.757128e+09\n",
|
"Found heuristic solution: objective 1.729688e+10\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Root relaxation: objective 8.290622e+09, 512 iterations, 0.00 seconds (0.00 work units)\n",
|
"Root relaxation: objective 8.290622e+09, 501 iterations, 0.00 seconds (0.02 work units)\n",
|
||||||
"\n",
|
"\n",
|
||||||
" Nodes | Current Node | Objective Bounds | Work\n",
|
" Nodes | Current Node | Objective Bounds | Work\n",
|
||||||
" Expl Unexpl | Obj Depth IntInf | Incumbent BestBd Gap | It/Node Time\n",
|
" Expl Unexpl | Obj Depth IntInf | Incumbent BestBd Gap | It/Node Time\n",
|
||||||
"\n",
|
"\n",
|
||||||
" 0 0 8.2906e+09 0 1 9.7571e+09 8.2906e+09 15.0% - 0s\n",
|
" 0 0 8.2906e+09 0 1 1.7297e+10 8.2906e+09 52.1% - 0s\n",
|
||||||
"H 0 0 8.298273e+09 8.2906e+09 0.09% - 0s\n",
|
"H 0 0 8.298243e+09 8.2906e+09 0.09% - 0s\n",
|
||||||
" 0 0 8.2907e+09 0 4 8.2983e+09 8.2907e+09 0.09% - 0s\n",
|
" 0 0 8.2907e+09 0 3 8.2982e+09 8.2907e+09 0.09% - 0s\n",
|
||||||
" 0 0 8.2907e+09 0 1 8.2983e+09 8.2907e+09 0.09% - 0s\n",
|
|
||||||
" 0 0 8.2907e+09 0 4 8.2983e+09 8.2907e+09 0.09% - 0s\n",
|
|
||||||
"H 0 0 8.293980e+09 8.2907e+09 0.04% - 0s\n",
|
"H 0 0 8.293980e+09 8.2907e+09 0.04% - 0s\n",
|
||||||
" 0 0 8.2907e+09 0 5 8.2940e+09 8.2907e+09 0.04% - 0s\n",
|
|
||||||
" 0 0 8.2907e+09 0 1 8.2940e+09 8.2907e+09 0.04% - 0s\n",
|
" 0 0 8.2907e+09 0 1 8.2940e+09 8.2907e+09 0.04% - 0s\n",
|
||||||
" 0 0 8.2907e+09 0 2 8.2940e+09 8.2907e+09 0.04% - 0s\n",
|
" 0 0 8.2907e+09 0 1 8.2940e+09 8.2907e+09 0.04% - 0s\n",
|
||||||
" 0 0 8.2908e+09 0 1 8.2940e+09 8.2908e+09 0.04% - 0s\n",
|
" 0 0 8.2907e+09 0 3 8.2940e+09 8.2907e+09 0.04% - 0s\n",
|
||||||
" 0 0 8.2908e+09 0 4 8.2940e+09 8.2908e+09 0.04% - 0s\n",
|
" 0 0 8.2907e+09 0 3 8.2940e+09 8.2907e+09 0.04% - 0s\n",
|
||||||
" 0 0 8.2908e+09 0 4 8.2940e+09 8.2908e+09 0.04% - 0s\n",
|
" 0 0 8.2907e+09 0 4 8.2940e+09 8.2907e+09 0.04% - 0s\n",
|
||||||
"H 0 0 8.291465e+09 8.2908e+09 0.01% - 0s\n",
|
" 0 0 8.2907e+09 0 3 8.2940e+09 8.2907e+09 0.04% - 0s\n",
|
||||||
|
" 0 0 8.2907e+09 0 3 8.2940e+09 8.2907e+09 0.04% - 0s\n",
|
||||||
|
" 0 0 8.2907e+09 0 4 8.2940e+09 8.2907e+09 0.04% - 0s\n",
|
||||||
|
"H 0 0 8.291961e+09 8.2907e+09 0.01% - 0s\n",
|
||||||
|
" 0 0 8.2907e+09 0 1 8.2920e+09 8.2907e+09 0.01% - 0s\n",
|
||||||
|
" 0 0 8.2907e+09 0 3 8.2920e+09 8.2907e+09 0.01% - 0s\n",
|
||||||
|
" 0 0 8.2907e+09 0 4 8.2920e+09 8.2907e+09 0.01% - 0s\n",
|
||||||
|
" 0 0 8.2907e+09 0 2 8.2920e+09 8.2907e+09 0.01% - 0s\n",
|
||||||
|
" 0 0 8.2908e+09 0 3 8.2920e+09 8.2908e+09 0.01% - 0s\n",
|
||||||
|
" 0 0 8.2908e+09 0 5 8.2920e+09 8.2908e+09 0.01% - 0s\n",
|
||||||
|
" 0 0 8.2908e+09 0 5 8.2920e+09 8.2908e+09 0.01% - 0s\n",
|
||||||
|
" 0 2 8.2908e+09 0 5 8.2920e+09 8.2908e+09 0.01% - 0s\n",
|
||||||
|
"H 9 9 8.291298e+09 8.2908e+09 0.01% 1.4 0s\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Cutting planes:\n",
|
"Cutting planes:\n",
|
||||||
" Gomory: 2\n",
|
" MIR: 2\n",
|
||||||
" MIR: 1\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"Explored 1 nodes (1031 simplex iterations) in 0.15 seconds (0.03 work units)\n",
|
"Explored 10 nodes (759 simplex iterations) in 0.09 seconds (0.11 work units)\n",
|
||||||
"Thread count was 20 (of 20 available processors)\n",
|
"Thread count was 1 (of 20 available processors)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Solution count 4: 8.29147e+09 8.29398e+09 8.29827e+09 9.75713e+09 \n",
|
"Solution count 6: 8.2913e+09 8.29196e+09 8.29398e+09 ... 1.72969e+10\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Optimal solution found (tolerance 1.00e-04)\n",
|
"Optimal solution found (tolerance 1.00e-04)\n",
|
||||||
"Best objective 8.291465302389e+09, best bound 8.290781665333e+09, gap 0.0082%\n"
|
"Best objective 8.291298126440e+09, best bound 8.290812450252e+09, gap 0.0059%\n",
|
||||||
|
"\n",
|
||||||
|
"User-callback calls 910, time in user-callback 0.00 sec\n"
|
||||||
]
|
]
|
||||||
},
|
|
||||||
{
|
|
||||||
"data": {
|
|
||||||
"text/plain": [
|
|
||||||
"{}"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"execution_count": 9,
|
|
||||||
"metadata": {},
|
|
||||||
"output_type": "execute_result"
|
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"source": [
|
"source": [
|
||||||
"solver_baseline = LearningSolver(components=[])\n",
|
"solver_baseline = LearningSolver(components=[])\n",
|
||||||
"solver_baseline.fit(train_data)\n",
|
"solver_baseline.fit(train_data)\n",
|
||||||
"solver_baseline.optimize(test_data[0], build_uc_model)"
|
"solver_baseline.optimize(test_data[0], build_uc_model);"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -713,10 +736,13 @@
|
|||||||
"name": "stdout",
|
"name": "stdout",
|
||||||
"output_type": "stream",
|
"output_type": "stream",
|
||||||
"text": [
|
"text": [
|
||||||
"Gurobi Optimizer version 10.0.3 build v10.0.3rc0 (linux64)\n",
|
"Gurobi Optimizer version 12.0.2 build v12.0.2rc0 (linux64 - \"Ubuntu 22.04.4 LTS\")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"CPU model: 13th Gen Intel(R) Core(TM) i7-13800H, instruction set [SSE2|AVX|AVX2]\n",
|
"CPU model: 13th Gen Intel(R) Core(TM) i7-13800H, instruction set [SSE2|AVX|AVX2]\n",
|
||||||
"Thread count: 10 physical cores, 20 logical processors, using up to 20 threads\n",
|
"Thread count: 10 physical cores, 20 logical processors, using up to 1 threads\n",
|
||||||
|
"\n",
|
||||||
|
"Non-default parameters:\n",
|
||||||
|
"Threads 1\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Optimize a model with 1001 rows, 1000 columns and 2500 nonzeros\n",
|
"Optimize a model with 1001 rows, 1000 columns and 2500 nonzeros\n",
|
||||||
"Model fingerprint: 0x19042f12\n",
|
"Model fingerprint: 0x19042f12\n",
|
||||||
@@ -733,15 +759,20 @@
|
|||||||
" 0 6.5917580e+09 5.627453e+04 0.000000e+00 0s\n",
|
" 0 6.5917580e+09 5.627453e+04 0.000000e+00 0s\n",
|
||||||
" 1 8.2535968e+09 0.000000e+00 0.000000e+00 0s\n",
|
" 1 8.2535968e+09 0.000000e+00 0.000000e+00 0s\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Solved in 1 iterations and 0.01 seconds (0.00 work units)\n",
|
"Solved in 1 iterations and 0.00 seconds (0.00 work units)\n",
|
||||||
"Optimal objective 8.253596777e+09\n",
|
"Optimal objective 8.253596777e+09\n",
|
||||||
"Gurobi Optimizer version 10.0.3 build v10.0.3rc0 (linux64)\n",
|
"\n",
|
||||||
|
"User-callback calls 59, time in user-callback 0.00 sec\n",
|
||||||
|
"Gurobi Optimizer version 12.0.2 build v12.0.2rc0 (linux64 - \"Ubuntu 22.04.4 LTS\")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"CPU model: 13th Gen Intel(R) Core(TM) i7-13800H, instruction set [SSE2|AVX|AVX2]\n",
|
"CPU model: 13th Gen Intel(R) Core(TM) i7-13800H, instruction set [SSE2|AVX|AVX2]\n",
|
||||||
"Thread count: 10 physical cores, 20 logical processors, using up to 20 threads\n",
|
"Thread count: 10 physical cores, 20 logical processors, using up to 1 threads\n",
|
||||||
|
"\n",
|
||||||
|
"Non-default parameters:\n",
|
||||||
|
"Threads 1\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Optimize a model with 1001 rows, 1000 columns and 2500 nonzeros\n",
|
"Optimize a model with 1001 rows, 1000 columns and 2500 nonzeros\n",
|
||||||
"Model fingerprint: 0xf97cde91\n",
|
"Model fingerprint: 0x6926c32f\n",
|
||||||
"Variable types: 500 continuous, 500 integer (500 binary)\n",
|
"Variable types: 500 continuous, 500 integer (500 binary)\n",
|
||||||
"Coefficient statistics:\n",
|
"Coefficient statistics:\n",
|
||||||
" Matrix range [1e+00, 2e+06]\n",
|
" Matrix range [1e+00, 2e+06]\n",
|
||||||
@@ -749,49 +780,44 @@
|
|||||||
" Bounds range [1e+00, 1e+00]\n",
|
" Bounds range [1e+00, 1e+00]\n",
|
||||||
" RHS range [3e+08, 3e+08]\n",
|
" RHS range [3e+08, 3e+08]\n",
|
||||||
"\n",
|
"\n",
|
||||||
"User MIP start produced solution with objective 8.25814e+09 (0.00s)\n",
|
"User MIP start produced solution with objective 8.25989e+09 (0.01s)\n",
|
||||||
"User MIP start produced solution with objective 8.25512e+09 (0.01s)\n",
|
"User MIP start produced solution with objective 8.25699e+09 (0.05s)\n",
|
||||||
"User MIP start produced solution with objective 8.25483e+09 (0.01s)\n",
|
"User MIP start produced solution with objective 8.25678e+09 (0.05s)\n",
|
||||||
"User MIP start produced solution with objective 8.25483e+09 (0.01s)\n",
|
"User MIP start produced solution with objective 8.25668e+09 (0.05s)\n",
|
||||||
"User MIP start produced solution with objective 8.25483e+09 (0.01s)\n",
|
"User MIP start produced solution with objective 8.2554e+09 (0.05s)\n",
|
||||||
"User MIP start produced solution with objective 8.25459e+09 (0.01s)\n",
|
"User MIP start produced solution with objective 8.25448e+09 (0.05s)\n",
|
||||||
"User MIP start produced solution with objective 8.25459e+09 (0.01s)\n",
|
"User MIP start produced solution with objective 8.25448e+09 (0.05s)\n",
|
||||||
"Loaded user MIP start with objective 8.25459e+09\n",
|
"Loaded user MIP start with objective 8.25448e+09\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
"Presolve removed 500 rows and 0 columns\n",
|
||||||
"Presolve time: 0.00s\n",
|
"Presolve time: 0.00s\n",
|
||||||
"Presolved: 1001 rows, 1000 columns, 2500 nonzeros\n",
|
"Presolved: 501 rows, 1000 columns, 2000 nonzeros\n",
|
||||||
"Variable types: 500 continuous, 500 integer (500 binary)\n",
|
"Variable types: 500 continuous, 500 integer (500 binary)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Root relaxation: objective 8.253597e+09, 512 iterations, 0.00 seconds (0.00 work units)\n",
|
"Root relaxation: objective 8.253597e+09, 501 iterations, 0.00 seconds (0.02 work units)\n",
|
||||||
"\n",
|
"\n",
|
||||||
" Nodes | Current Node | Objective Bounds | Work\n",
|
" Nodes | Current Node | Objective Bounds | Work\n",
|
||||||
" Expl Unexpl | Obj Depth IntInf | Incumbent BestBd Gap | It/Node Time\n",
|
" Expl Unexpl | Obj Depth IntInf | Incumbent BestBd Gap | It/Node Time\n",
|
||||||
"\n",
|
"\n",
|
||||||
" 0 0 8.2536e+09 0 1 8.2546e+09 8.2536e+09 0.01% - 0s\n",
|
" 0 0 8.2536e+09 0 1 8.2545e+09 8.2536e+09 0.01% - 0s\n",
|
||||||
" 0 0 8.2537e+09 0 3 8.2546e+09 8.2537e+09 0.01% - 0s\n",
|
"H 0 0 8.254435e+09 8.2536e+09 0.01% - 0s\n",
|
||||||
" 0 0 8.2537e+09 0 1 8.2546e+09 8.2537e+09 0.01% - 0s\n",
|
" 0 0 - 0 8.2544e+09 8.2537e+09 0.01% - 0s\n",
|
||||||
" 0 0 8.2537e+09 0 4 8.2546e+09 8.2537e+09 0.01% - 0s\n",
|
|
||||||
" 0 0 8.2537e+09 0 4 8.2546e+09 8.2537e+09 0.01% - 0s\n",
|
|
||||||
" 0 0 8.2538e+09 0 4 8.2546e+09 8.2538e+09 0.01% - 0s\n",
|
|
||||||
" 0 0 8.2538e+09 0 5 8.2546e+09 8.2538e+09 0.01% - 0s\n",
|
|
||||||
" 0 0 8.2538e+09 0 6 8.2546e+09 8.2538e+09 0.01% - 0s\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"Cutting planes:\n",
|
"Cutting planes:\n",
|
||||||
" Cover: 1\n",
|
" RLT: 2\n",
|
||||||
" MIR: 2\n",
|
|
||||||
" StrongCG: 1\n",
|
|
||||||
" Flow cover: 1\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"Explored 1 nodes (575 simplex iterations) in 0.05 seconds (0.01 work units)\n",
|
"Explored 1 nodes (503 simplex iterations) in 0.07 seconds (0.03 work units)\n",
|
||||||
"Thread count was 20 (of 20 available processors)\n",
|
"Thread count was 1 (of 20 available processors)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Solution count 4: 8.25459e+09 8.25483e+09 8.25512e+09 8.25814e+09 \n",
|
"Solution count 7: 8.25443e+09 8.25448e+09 8.2554e+09 ... 8.25989e+09\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Optimal solution found (tolerance 1.00e-04)\n",
|
"Optimal solution found (tolerance 1.00e-04)\n",
|
||||||
"Best objective 8.254590409970e+09, best bound 8.253768093811e+09, gap 0.0100%\n",
|
"Best objective 8.254434593504e+09, best bound 8.253676932849e+09, gap 0.0092%\n",
|
||||||
"obj = 8254590409.969726\n",
|
"\n",
|
||||||
|
"User-callback calls 787, time in user-callback 0.00 sec\n",
|
||||||
|
"obj = 8254434593.503945\n",
|
||||||
"x = [1.0, 1.0, 0.0]\n",
|
"x = [1.0, 1.0, 0.0]\n",
|
||||||
"y = [935662.0949262811, 1604270.0218116897, 0.0]\n"
|
"y = [935662.09492646, 1604270.0218116897, 0.0]\n"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
|
|||||||
@@ -41,7 +41,7 @@
|
|||||||
"In this tutorial, we will demonstrate how to use and install the Python/Pyomo version of the package. The first step is to install Julia in your machine. See the [official Julia website for more instructions](https://julialang.org/downloads/). After Julia is installed, launch the Julia REPL, type `]` to enter package mode, then install MIPLearn:\n",
|
"In this tutorial, we will demonstrate how to use and install the Python/Pyomo version of the package. The first step is to install Julia in your machine. See the [official Julia website for more instructions](https://julialang.org/downloads/). After Julia is installed, launch the Julia REPL, type `]` to enter package mode, then install MIPLearn:\n",
|
||||||
"\n",
|
"\n",
|
||||||
"```\n",
|
"```\n",
|
||||||
"pkg> add MIPLearn@0.3\n",
|
"pkg> add MIPLearn@0.4\n",
|
||||||
"```"
|
"```"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -45,16 +45,10 @@
|
|||||||
"- Python version, compatible with the Pyomo and Gurobipy modeling languages,\n",
|
"- Python version, compatible with the Pyomo and Gurobipy modeling languages,\n",
|
||||||
"- Julia version, compatible with the JuMP modeling language.\n",
|
"- Julia version, compatible with the JuMP modeling language.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"In this tutorial, we will demonstrate how to use and install the Python/Pyomo version of the package. The first step is to install Python 3.8+ in your computer. See the [official Python website for more instructions](https://www.python.org/downloads/). After Python is installed, we proceed to install MIPLearn using `pip`:\n",
|
"In this tutorial, we will demonstrate how to use and install the Python/Pyomo version of the package. The first step is to install Python 3.9+ in your computer. See the [official Python website for more instructions](https://www.python.org/downloads/). After Python is installed, we proceed to install MIPLearn using `pip`:\n",
|
||||||
"\n",
|
"\n",
|
||||||
"```\n",
|
"```\n",
|
||||||
"$ pip install MIPLearn==0.3\n",
|
"$ pip install MIPLearn~=0.4\n",
|
||||||
"```\n",
|
|
||||||
"\n",
|
|
||||||
"In addition to MIPLearn itself, we will also install Gurobi 10.0, a state-of-the-art commercial MILP solver. This step also install a demo license for Gurobi, which should able to solve the small optimization problems in this tutorial. A license is required for solving larger-scale problems.\n",
|
|
||||||
"\n",
|
|
||||||
"```\n",
|
|
||||||
"$ pip install 'gurobipy>=10,<10.1'\n",
|
|
||||||
"```"
|
"```"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -226,12 +220,19 @@
|
|||||||
"name": "stdout",
|
"name": "stdout",
|
||||||
"output_type": "stream",
|
"output_type": "stream",
|
||||||
"text": [
|
"text": [
|
||||||
"Restricted license - for non-production use only - expires 2024-10-28\n",
|
"Set parameter Threads to value 1\n",
|
||||||
|
"Read parameters from file gurobi.env\n",
|
||||||
|
"Restricted license - for non-production use only - expires 2026-11-23\n",
|
||||||
|
"Set parameter OutputFlag to value 1\n",
|
||||||
"Set parameter QCPDual to value 1\n",
|
"Set parameter QCPDual to value 1\n",
|
||||||
"Gurobi Optimizer version 10.0.3 build v10.0.3rc0 (linux64)\n",
|
"Gurobi Optimizer version 12.0.2 build v12.0.2rc0 (linux64 - \"Ubuntu 22.04.4 LTS\")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"CPU model: 13th Gen Intel(R) Core(TM) i7-13800H, instruction set [SSE2|AVX|AVX2]\n",
|
"CPU model: 13th Gen Intel(R) Core(TM) i7-13800H, instruction set [SSE2|AVX|AVX2]\n",
|
||||||
"Thread count: 10 physical cores, 20 logical processors, using up to 20 threads\n",
|
"Thread count: 10 physical cores, 20 logical processors, using up to 1 threads\n",
|
||||||
|
"\n",
|
||||||
|
"Non-default parameters:\n",
|
||||||
|
"QCPDual 1\n",
|
||||||
|
"Threads 1\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Optimize a model with 7 rows, 6 columns and 15 nonzeros\n",
|
"Optimize a model with 7 rows, 6 columns and 15 nonzeros\n",
|
||||||
"Model fingerprint: 0x15c7a953\n",
|
"Model fingerprint: 0x15c7a953\n",
|
||||||
@@ -241,25 +242,23 @@
|
|||||||
" Objective range [2e+00, 7e+02]\n",
|
" Objective range [2e+00, 7e+02]\n",
|
||||||
" Bounds range [1e+00, 1e+00]\n",
|
" Bounds range [1e+00, 1e+00]\n",
|
||||||
" RHS range [1e+02, 1e+02]\n",
|
" RHS range [1e+02, 1e+02]\n",
|
||||||
"Presolve removed 2 rows and 1 columns\n",
|
"Presolve removed 6 rows and 3 columns\n",
|
||||||
"Presolve time: 0.00s\n",
|
"Presolve time: 0.00s\n",
|
||||||
"Presolved: 5 rows, 5 columns, 13 nonzeros\n",
|
"Presolved: 1 rows, 3 columns, 3 nonzeros\n",
|
||||||
"Variable types: 0 continuous, 5 integer (3 binary)\n",
|
"Variable types: 0 continuous, 3 integer (1 binary)\n",
|
||||||
"Found heuristic solution: objective 1400.0000000\n",
|
"Found heuristic solution: objective 1990.0000000\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Root relaxation: objective 1.035000e+03, 3 iterations, 0.00 seconds (0.00 work units)\n",
|
"Root relaxation: objective 1.320000e+03, 0 iterations, 0.00 seconds (0.00 work units)\n",
|
||||||
"\n",
|
"\n",
|
||||||
" Nodes | Current Node | Objective Bounds | Work\n",
|
" Nodes | Current Node | Objective Bounds | Work\n",
|
||||||
" Expl Unexpl | Obj Depth IntInf | Incumbent BestBd Gap | It/Node Time\n",
|
" Expl Unexpl | Obj Depth IntInf | Incumbent BestBd Gap | It/Node Time\n",
|
||||||
"\n",
|
"\n",
|
||||||
" 0 0 1035.00000 0 1 1400.00000 1035.00000 26.1% - 0s\n",
|
|
||||||
" 0 0 1105.71429 0 1 1400.00000 1105.71429 21.0% - 0s\n",
|
|
||||||
"* 0 0 0 1320.0000000 1320.00000 0.00% - 0s\n",
|
"* 0 0 0 1320.0000000 1320.00000 0.00% - 0s\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Explored 1 nodes (5 simplex iterations) in 0.01 seconds (0.00 work units)\n",
|
"Explored 1 nodes (0 simplex iterations) in 0.01 seconds (0.00 work units)\n",
|
||||||
"Thread count was 20 (of 20 available processors)\n",
|
"Thread count was 1 (of 20 available processors)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Solution count 2: 1320 1400 \n",
|
"Solution count 2: 1320 1990 \n",
|
||||||
"\n",
|
"\n",
|
||||||
"Optimal solution found (tolerance 1.00e-04)\n",
|
"Optimal solution found (tolerance 1.00e-04)\n",
|
||||||
"Best objective 1.320000000000e+03, best bound 1.320000000000e+03, gap 0.0000%\n",
|
"Best objective 1.320000000000e+03, best bound 1.320000000000e+03, gap 0.0000%\n",
|
||||||
@@ -489,11 +488,16 @@
|
|||||||
"name": "stdout",
|
"name": "stdout",
|
||||||
"output_type": "stream",
|
"output_type": "stream",
|
||||||
"text": [
|
"text": [
|
||||||
|
"Set parameter OutputFlag to value 1\n",
|
||||||
"Set parameter QCPDual to value 1\n",
|
"Set parameter QCPDual to value 1\n",
|
||||||
"Gurobi Optimizer version 10.0.3 build v10.0.3rc0 (linux64)\n",
|
"Gurobi Optimizer version 12.0.2 build v12.0.2rc0 (linux64 - \"Ubuntu 22.04.4 LTS\")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"CPU model: 13th Gen Intel(R) Core(TM) i7-13800H, instruction set [SSE2|AVX|AVX2]\n",
|
"CPU model: 13th Gen Intel(R) Core(TM) i7-13800H, instruction set [SSE2|AVX|AVX2]\n",
|
||||||
"Thread count: 10 physical cores, 20 logical processors, using up to 20 threads\n",
|
"Thread count: 10 physical cores, 20 logical processors, using up to 1 threads\n",
|
||||||
|
"\n",
|
||||||
|
"Non-default parameters:\n",
|
||||||
|
"QCPDual 1\n",
|
||||||
|
"Threads 1\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Optimize a model with 1001 rows, 1000 columns and 2500 nonzeros\n",
|
"Optimize a model with 1001 rows, 1000 columns and 2500 nonzeros\n",
|
||||||
"Model fingerprint: 0x5e67c6ee\n",
|
"Model fingerprint: 0x5e67c6ee\n",
|
||||||
@@ -512,14 +516,19 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"Solved in 1 iterations and 0.01 seconds (0.00 work units)\n",
|
"Solved in 1 iterations and 0.01 seconds (0.00 work units)\n",
|
||||||
"Optimal objective 8.290621916e+09\n",
|
"Optimal objective 8.290621916e+09\n",
|
||||||
|
"Set parameter OutputFlag to value 1\n",
|
||||||
"Set parameter QCPDual to value 1\n",
|
"Set parameter QCPDual to value 1\n",
|
||||||
"Gurobi Optimizer version 10.0.3 build v10.0.3rc0 (linux64)\n",
|
"Gurobi Optimizer version 12.0.2 build v12.0.2rc0 (linux64 - \"Ubuntu 22.04.4 LTS\")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"CPU model: 13th Gen Intel(R) Core(TM) i7-13800H, instruction set [SSE2|AVX|AVX2]\n",
|
"CPU model: 13th Gen Intel(R) Core(TM) i7-13800H, instruction set [SSE2|AVX|AVX2]\n",
|
||||||
"Thread count: 10 physical cores, 20 logical processors, using up to 20 threads\n",
|
"Thread count: 10 physical cores, 20 logical processors, using up to 1 threads\n",
|
||||||
|
"\n",
|
||||||
|
"Non-default parameters:\n",
|
||||||
|
"QCPDual 1\n",
|
||||||
|
"Threads 1\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Optimize a model with 1001 rows, 1000 columns and 2500 nonzeros\n",
|
"Optimize a model with 1001 rows, 1000 columns and 2500 nonzeros\n",
|
||||||
"Model fingerprint: 0x4a7cfe2b\n",
|
"Model fingerprint: 0xff6a55c5\n",
|
||||||
"Variable types: 500 continuous, 500 integer (500 binary)\n",
|
"Variable types: 500 continuous, 500 integer (500 binary)\n",
|
||||||
"Coefficient statistics:\n",
|
"Coefficient statistics:\n",
|
||||||
" Matrix range [1e+00, 2e+06]\n",
|
" Matrix range [1e+00, 2e+06]\n",
|
||||||
@@ -527,8 +536,8 @@
|
|||||||
" Bounds range [1e+00, 1e+00]\n",
|
" Bounds range [1e+00, 1e+00]\n",
|
||||||
" RHS range [3e+08, 3e+08]\n",
|
" RHS range [3e+08, 3e+08]\n",
|
||||||
"\n",
|
"\n",
|
||||||
"User MIP start produced solution with objective 8.29153e+09 (0.01s)\n",
|
"User MIP start produced solution with objective 8.29153e+09 (0.00s)\n",
|
||||||
"User MIP start produced solution with objective 8.29153e+09 (0.01s)\n",
|
"User MIP start produced solution with objective 8.29153e+09 (0.00s)\n",
|
||||||
"Loaded user MIP start with objective 8.29153e+09\n",
|
"Loaded user MIP start with objective 8.29153e+09\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Presolve time: 0.00s\n",
|
"Presolve time: 0.00s\n",
|
||||||
@@ -543,19 +552,20 @@
|
|||||||
" 0 0 8.2906e+09 0 1 8.2915e+09 8.2906e+09 0.01% - 0s\n",
|
" 0 0 8.2906e+09 0 1 8.2915e+09 8.2906e+09 0.01% - 0s\n",
|
||||||
" 0 0 8.2907e+09 0 3 8.2915e+09 8.2907e+09 0.01% - 0s\n",
|
" 0 0 8.2907e+09 0 3 8.2915e+09 8.2907e+09 0.01% - 0s\n",
|
||||||
" 0 0 8.2907e+09 0 1 8.2915e+09 8.2907e+09 0.01% - 0s\n",
|
" 0 0 8.2907e+09 0 1 8.2915e+09 8.2907e+09 0.01% - 0s\n",
|
||||||
" 0 0 8.2907e+09 0 2 8.2915e+09 8.2907e+09 0.01% - 0s\n",
|
" 0 0 - 0 8.2915e+09 8.2907e+09 0.01% - 0s\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Cutting planes:\n",
|
"Cutting planes:\n",
|
||||||
" Gomory: 1\n",
|
" Gomory: 1\n",
|
||||||
|
" Cover: 1\n",
|
||||||
" Flow cover: 2\n",
|
" Flow cover: 2\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Explored 1 nodes (565 simplex iterations) in 0.04 seconds (0.01 work units)\n",
|
"Explored 1 nodes (564 simplex iterations) in 0.03 seconds (0.01 work units)\n",
|
||||||
"Thread count was 20 (of 20 available processors)\n",
|
"Thread count was 1 (of 20 available processors)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Solution count 1: 8.29153e+09 \n",
|
"Solution count 1: 8.29153e+09 \n",
|
||||||
"\n",
|
"\n",
|
||||||
"Optimal solution found (tolerance 1.00e-04)\n",
|
"Optimal solution found (tolerance 1.00e-04)\n",
|
||||||
"Best objective 8.291528276179e+09, best bound 8.290733258025e+09, gap 0.0096%\n",
|
"Best objective 8.291528276179e+09, best bound 8.290729173948e+09, gap 0.0096%\n",
|
||||||
"WARNING: Cannot get reduced costs for MIP.\n",
|
"WARNING: Cannot get reduced costs for MIP.\n",
|
||||||
"WARNING: Cannot get duals for MIP.\n"
|
"WARNING: Cannot get duals for MIP.\n"
|
||||||
]
|
]
|
||||||
@@ -563,7 +573,7 @@
|
|||||||
{
|
{
|
||||||
"data": {
|
"data": {
|
||||||
"text/plain": [
|
"text/plain": [
|
||||||
"{}"
|
"(<miplearn.solvers.pyomo.PyomoModel at 0x7fdb38952450>, {})"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"execution_count": 8,
|
"execution_count": 8,
|
||||||
@@ -602,11 +612,16 @@
|
|||||||
"name": "stdout",
|
"name": "stdout",
|
||||||
"output_type": "stream",
|
"output_type": "stream",
|
||||||
"text": [
|
"text": [
|
||||||
|
"Set parameter OutputFlag to value 1\n",
|
||||||
"Set parameter QCPDual to value 1\n",
|
"Set parameter QCPDual to value 1\n",
|
||||||
"Gurobi Optimizer version 10.0.3 build v10.0.3rc0 (linux64)\n",
|
"Gurobi Optimizer version 12.0.2 build v12.0.2rc0 (linux64 - \"Ubuntu 22.04.4 LTS\")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"CPU model: 13th Gen Intel(R) Core(TM) i7-13800H, instruction set [SSE2|AVX|AVX2]\n",
|
"CPU model: 13th Gen Intel(R) Core(TM) i7-13800H, instruction set [SSE2|AVX|AVX2]\n",
|
||||||
"Thread count: 10 physical cores, 20 logical processors, using up to 20 threads\n",
|
"Thread count: 10 physical cores, 20 logical processors, using up to 1 threads\n",
|
||||||
|
"\n",
|
||||||
|
"Non-default parameters:\n",
|
||||||
|
"QCPDual 1\n",
|
||||||
|
"Threads 1\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Optimize a model with 1001 rows, 1000 columns and 2500 nonzeros\n",
|
"Optimize a model with 1001 rows, 1000 columns and 2500 nonzeros\n",
|
||||||
"Model fingerprint: 0x5e67c6ee\n",
|
"Model fingerprint: 0x5e67c6ee\n",
|
||||||
@@ -625,11 +640,16 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"Solved in 1 iterations and 0.01 seconds (0.00 work units)\n",
|
"Solved in 1 iterations and 0.01 seconds (0.00 work units)\n",
|
||||||
"Optimal objective 8.290621916e+09\n",
|
"Optimal objective 8.290621916e+09\n",
|
||||||
|
"Set parameter OutputFlag to value 1\n",
|
||||||
"Set parameter QCPDual to value 1\n",
|
"Set parameter QCPDual to value 1\n",
|
||||||
"Gurobi Optimizer version 10.0.3 build v10.0.3rc0 (linux64)\n",
|
"Gurobi Optimizer version 12.0.2 build v12.0.2rc0 (linux64 - \"Ubuntu 22.04.4 LTS\")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"CPU model: 13th Gen Intel(R) Core(TM) i7-13800H, instruction set [SSE2|AVX|AVX2]\n",
|
"CPU model: 13th Gen Intel(R) Core(TM) i7-13800H, instruction set [SSE2|AVX|AVX2]\n",
|
||||||
"Thread count: 10 physical cores, 20 logical processors, using up to 20 threads\n",
|
"Thread count: 10 physical cores, 20 logical processors, using up to 1 threads\n",
|
||||||
|
"\n",
|
||||||
|
"Non-default parameters:\n",
|
||||||
|
"QCPDual 1\n",
|
||||||
|
"Threads 1\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Optimize a model with 1001 rows, 1000 columns and 2500 nonzeros\n",
|
"Optimize a model with 1001 rows, 1000 columns and 2500 nonzeros\n",
|
||||||
"Model fingerprint: 0x8a0f9587\n",
|
"Model fingerprint: 0x8a0f9587\n",
|
||||||
@@ -658,22 +678,25 @@
|
|||||||
" 0 0 8.2907e+09 0 5 8.2940e+09 8.2907e+09 0.04% - 0s\n",
|
" 0 0 8.2907e+09 0 5 8.2940e+09 8.2907e+09 0.04% - 0s\n",
|
||||||
" 0 0 8.2907e+09 0 1 8.2940e+09 8.2907e+09 0.04% - 0s\n",
|
" 0 0 8.2907e+09 0 1 8.2940e+09 8.2907e+09 0.04% - 0s\n",
|
||||||
" 0 0 8.2907e+09 0 2 8.2940e+09 8.2907e+09 0.04% - 0s\n",
|
" 0 0 8.2907e+09 0 2 8.2940e+09 8.2907e+09 0.04% - 0s\n",
|
||||||
" 0 0 8.2908e+09 0 1 8.2940e+09 8.2908e+09 0.04% - 0s\n",
|
|
||||||
" 0 0 8.2908e+09 0 4 8.2940e+09 8.2908e+09 0.04% - 0s\n",
|
" 0 0 8.2908e+09 0 4 8.2940e+09 8.2908e+09 0.04% - 0s\n",
|
||||||
" 0 0 8.2908e+09 0 4 8.2940e+09 8.2908e+09 0.04% - 0s\n",
|
" 0 0 8.2908e+09 0 3 8.2940e+09 8.2908e+09 0.04% - 0s\n",
|
||||||
"H 0 0 8.291465e+09 8.2908e+09 0.01% - 0s\n",
|
" 0 0 8.2908e+09 0 3 8.2940e+09 8.2908e+09 0.04% - 0s\n",
|
||||||
|
" 0 2 8.2908e+09 0 3 8.2940e+09 8.2908e+09 0.04% - 0s\n",
|
||||||
|
"H 9 9 8.292471e+09 8.2908e+09 0.02% 1.3 0s\n",
|
||||||
|
"* 90 41 44 8.291525e+09 8.2908e+09 0.01% 1.5 0s\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Cutting planes:\n",
|
"Cutting planes:\n",
|
||||||
" Gomory: 2\n",
|
" Gomory: 1\n",
|
||||||
" MIR: 1\n",
|
" Cover: 1\n",
|
||||||
|
" MIR: 2\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Explored 1 nodes (1025 simplex iterations) in 0.12 seconds (0.03 work units)\n",
|
"Explored 91 nodes (1166 simplex iterations) in 0.06 seconds (0.05 work units)\n",
|
||||||
"Thread count was 20 (of 20 available processors)\n",
|
"Thread count was 1 (of 20 available processors)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Solution count 4: 8.29147e+09 8.29398e+09 8.29827e+09 9.75713e+09 \n",
|
"Solution count 7: 8.29152e+09 8.29247e+09 8.29398e+09 ... 1.0319e+10\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Optimal solution found (tolerance 1.00e-04)\n",
|
"Optimal solution found (tolerance 1.00e-04)\n",
|
||||||
"Best objective 8.291465302389e+09, best bound 8.290781665333e+09, gap 0.0082%\n",
|
"Best objective 8.291524908632e+09, best bound 8.290823611882e+09, gap 0.0085%\n",
|
||||||
"WARNING: Cannot get reduced costs for MIP.\n",
|
"WARNING: Cannot get reduced costs for MIP.\n",
|
||||||
"WARNING: Cannot get duals for MIP.\n"
|
"WARNING: Cannot get duals for MIP.\n"
|
||||||
]
|
]
|
||||||
@@ -681,7 +704,7 @@
|
|||||||
{
|
{
|
||||||
"data": {
|
"data": {
|
||||||
"text/plain": [
|
"text/plain": [
|
||||||
"{}"
|
"(<miplearn.solvers.pyomo.PyomoModel at 0x7fdb2f563f50>, {})"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"execution_count": 9,
|
"execution_count": 9,
|
||||||
@@ -730,11 +753,16 @@
|
|||||||
"name": "stdout",
|
"name": "stdout",
|
||||||
"output_type": "stream",
|
"output_type": "stream",
|
||||||
"text": [
|
"text": [
|
||||||
|
"Set parameter OutputFlag to value 1\n",
|
||||||
"Set parameter QCPDual to value 1\n",
|
"Set parameter QCPDual to value 1\n",
|
||||||
"Gurobi Optimizer version 10.0.3 build v10.0.3rc0 (linux64)\n",
|
"Gurobi Optimizer version 12.0.2 build v12.0.2rc0 (linux64 - \"Ubuntu 22.04.4 LTS\")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"CPU model: 13th Gen Intel(R) Core(TM) i7-13800H, instruction set [SSE2|AVX|AVX2]\n",
|
"CPU model: 13th Gen Intel(R) Core(TM) i7-13800H, instruction set [SSE2|AVX|AVX2]\n",
|
||||||
"Thread count: 10 physical cores, 20 logical processors, using up to 20 threads\n",
|
"Thread count: 10 physical cores, 20 logical processors, using up to 1 threads\n",
|
||||||
|
"\n",
|
||||||
|
"Non-default parameters:\n",
|
||||||
|
"QCPDual 1\n",
|
||||||
|
"Threads 1\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Optimize a model with 1001 rows, 1000 columns and 2500 nonzeros\n",
|
"Optimize a model with 1001 rows, 1000 columns and 2500 nonzeros\n",
|
||||||
"Model fingerprint: 0x2dfe4e1c\n",
|
"Model fingerprint: 0x2dfe4e1c\n",
|
||||||
@@ -753,14 +781,19 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"Solved in 1 iterations and 0.01 seconds (0.00 work units)\n",
|
"Solved in 1 iterations and 0.01 seconds (0.00 work units)\n",
|
||||||
"Optimal objective 8.253596777e+09\n",
|
"Optimal objective 8.253596777e+09\n",
|
||||||
|
"Set parameter OutputFlag to value 1\n",
|
||||||
"Set parameter QCPDual to value 1\n",
|
"Set parameter QCPDual to value 1\n",
|
||||||
"Gurobi Optimizer version 10.0.3 build v10.0.3rc0 (linux64)\n",
|
"Gurobi Optimizer version 12.0.2 build v12.0.2rc0 (linux64 - \"Ubuntu 22.04.4 LTS\")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"CPU model: 13th Gen Intel(R) Core(TM) i7-13800H, instruction set [SSE2|AVX|AVX2]\n",
|
"CPU model: 13th Gen Intel(R) Core(TM) i7-13800H, instruction set [SSE2|AVX|AVX2]\n",
|
||||||
"Thread count: 10 physical cores, 20 logical processors, using up to 20 threads\n",
|
"Thread count: 10 physical cores, 20 logical processors, using up to 1 threads\n",
|
||||||
|
"\n",
|
||||||
|
"Non-default parameters:\n",
|
||||||
|
"QCPDual 1\n",
|
||||||
|
"Threads 1\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Optimize a model with 1001 rows, 1000 columns and 2500 nonzeros\n",
|
"Optimize a model with 1001 rows, 1000 columns and 2500 nonzeros\n",
|
||||||
"Model fingerprint: 0x0f0924a1\n",
|
"Model fingerprint: 0xd941f1ed\n",
|
||||||
"Variable types: 500 continuous, 500 integer (500 binary)\n",
|
"Variable types: 500 continuous, 500 integer (500 binary)\n",
|
||||||
"Coefficient statistics:\n",
|
"Coefficient statistics:\n",
|
||||||
" Matrix range [1e+00, 2e+06]\n",
|
" Matrix range [1e+00, 2e+06]\n",
|
||||||
@@ -768,14 +801,11 @@
|
|||||||
" Bounds range [1e+00, 1e+00]\n",
|
" Bounds range [1e+00, 1e+00]\n",
|
||||||
" RHS range [3e+08, 3e+08]\n",
|
" RHS range [3e+08, 3e+08]\n",
|
||||||
"\n",
|
"\n",
|
||||||
"User MIP start produced solution with objective 8.25814e+09 (0.00s)\n",
|
"User MIP start produced solution with objective 8.25814e+09 (0.01s)\n",
|
||||||
"User MIP start produced solution with objective 8.25512e+09 (0.01s)\n",
|
"User MIP start produced solution with objective 8.25512e+09 (0.01s)\n",
|
||||||
"User MIP start produced solution with objective 8.25483e+09 (0.01s)\n",
|
"User MIP start produced solution with objective 8.25448e+09 (0.01s)\n",
|
||||||
"User MIP start produced solution with objective 8.25483e+09 (0.01s)\n",
|
"User MIP start produced solution with objective 8.25448e+09 (0.02s)\n",
|
||||||
"User MIP start produced solution with objective 8.25483e+09 (0.01s)\n",
|
"Loaded user MIP start with objective 8.25448e+09\n",
|
||||||
"User MIP start produced solution with objective 8.25459e+09 (0.01s)\n",
|
|
||||||
"User MIP start produced solution with objective 8.25459e+09 (0.01s)\n",
|
|
||||||
"Loaded user MIP start with objective 8.25459e+09\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"Presolve time: 0.00s\n",
|
"Presolve time: 0.00s\n",
|
||||||
"Presolved: 1001 rows, 1000 columns, 2500 nonzeros\n",
|
"Presolved: 1001 rows, 1000 columns, 2500 nonzeros\n",
|
||||||
@@ -786,31 +816,23 @@
|
|||||||
" Nodes | Current Node | Objective Bounds | Work\n",
|
" Nodes | Current Node | Objective Bounds | Work\n",
|
||||||
" Expl Unexpl | Obj Depth IntInf | Incumbent BestBd Gap | It/Node Time\n",
|
" Expl Unexpl | Obj Depth IntInf | Incumbent BestBd Gap | It/Node Time\n",
|
||||||
"\n",
|
"\n",
|
||||||
" 0 0 8.2536e+09 0 1 8.2546e+09 8.2536e+09 0.01% - 0s\n",
|
" 0 0 8.2536e+09 0 1 8.2545e+09 8.2536e+09 0.01% - 0s\n",
|
||||||
" 0 0 8.2537e+09 0 3 8.2546e+09 8.2537e+09 0.01% - 0s\n",
|
" 0 0 - 0 8.2545e+09 8.2537e+09 0.01% - 0s\n",
|
||||||
" 0 0 8.2537e+09 0 1 8.2546e+09 8.2537e+09 0.01% - 0s\n",
|
|
||||||
" 0 0 8.2537e+09 0 4 8.2546e+09 8.2537e+09 0.01% - 0s\n",
|
|
||||||
" 0 0 8.2537e+09 0 4 8.2546e+09 8.2537e+09 0.01% - 0s\n",
|
|
||||||
" 0 0 8.2538e+09 0 4 8.2546e+09 8.2538e+09 0.01% - 0s\n",
|
|
||||||
" 0 0 8.2538e+09 0 5 8.2546e+09 8.2538e+09 0.01% - 0s\n",
|
|
||||||
" 0 0 8.2538e+09 0 6 8.2546e+09 8.2538e+09 0.01% - 0s\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"Cutting planes:\n",
|
"Cutting planes:\n",
|
||||||
" Cover: 1\n",
|
" Cover: 1\n",
|
||||||
" MIR: 2\n",
|
" Flow cover: 2\n",
|
||||||
" StrongCG: 1\n",
|
|
||||||
" Flow cover: 1\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"Explored 1 nodes (575 simplex iterations) in 0.09 seconds (0.01 work units)\n",
|
"Explored 1 nodes (514 simplex iterations) in 0.03 seconds (0.01 work units)\n",
|
||||||
"Thread count was 20 (of 20 available processors)\n",
|
"Thread count was 1 (of 20 available processors)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Solution count 4: 8.25459e+09 8.25483e+09 8.25512e+09 8.25814e+09 \n",
|
"Solution count 3: 8.25448e+09 8.25512e+09 8.25814e+09 \n",
|
||||||
"\n",
|
"\n",
|
||||||
"Optimal solution found (tolerance 1.00e-04)\n",
|
"Optimal solution found (tolerance 1.00e-04)\n",
|
||||||
"Best objective 8.254590409970e+09, best bound 8.253768093811e+09, gap 0.0100%\n",
|
"Best objective 8.254479145594e+09, best bound 8.253676932849e+09, gap 0.0097%\n",
|
||||||
"WARNING: Cannot get reduced costs for MIP.\n",
|
"WARNING: Cannot get reduced costs for MIP.\n",
|
||||||
"WARNING: Cannot get duals for MIP.\n",
|
"WARNING: Cannot get duals for MIP.\n",
|
||||||
"obj = 8254590409.96973\n",
|
"obj = 8254479145.594172\n",
|
||||||
" x = [1.0, 1.0, 0.0, 1.0, 1.0]\n",
|
" x = [1.0, 1.0, 0.0, 1.0, 1.0]\n",
|
||||||
" y = [935662.0949262811, 1604270.0218116897, 0.0, 1369560.835229226, 602828.5321028307]\n"
|
" y = [935662.0949262811, 1604270.0218116897, 0.0, 1369560.835229226, 602828.5321028307]\n"
|
||||||
]
|
]
|
||||||
|
|||||||
BIN
miplearn/.io.py.swp
Normal file
BIN
miplearn/.io.py.swp
Normal file
Binary file not shown.
@@ -9,6 +9,7 @@ import sys
|
|||||||
from io import StringIO
|
from io import StringIO
|
||||||
from os.path import exists
|
from os.path import exists
|
||||||
from typing import Callable, List, Any
|
from typing import Callable, List, Any
|
||||||
|
import traceback
|
||||||
|
|
||||||
from ..h5 import H5File
|
from ..h5 import H5File
|
||||||
from ..io import _RedirectOutput, gzip, _to_h5_filename
|
from ..io import _RedirectOutput, gzip, _to_h5_filename
|
||||||
@@ -16,9 +17,15 @@ from ..parallel import p_umap
|
|||||||
|
|
||||||
|
|
||||||
class BasicCollector:
|
class BasicCollector:
|
||||||
def __init__(self, skip_lp: bool = False, write_mps: bool = True) -> None:
|
def __init__(
|
||||||
|
self,
|
||||||
|
skip_lp: bool = False,
|
||||||
|
write_mps: bool = True,
|
||||||
|
write_log: bool = True,
|
||||||
|
) -> None:
|
||||||
self.skip_lp = skip_lp
|
self.skip_lp = skip_lp
|
||||||
self.write_mps = write_mps
|
self.write_mps = write_mps
|
||||||
|
self.write_log = write_log
|
||||||
|
|
||||||
def collect(
|
def collect(
|
||||||
self,
|
self,
|
||||||
@@ -29,62 +36,68 @@ class BasicCollector:
|
|||||||
verbose: bool = False,
|
verbose: bool = False,
|
||||||
) -> None:
|
) -> None:
|
||||||
def _collect(data_filename: str) -> None:
|
def _collect(data_filename: str) -> None:
|
||||||
h5_filename = _to_h5_filename(data_filename)
|
try:
|
||||||
mps_filename = h5_filename.replace(".h5", ".mps")
|
h5_filename = _to_h5_filename(data_filename)
|
||||||
|
mps_filename = h5_filename.replace(".h5", ".mps")
|
||||||
|
log_filename = h5_filename.replace(".h5", ".h5.log")
|
||||||
|
|
||||||
if exists(h5_filename):
|
if exists(h5_filename):
|
||||||
# Try to read optimal solution
|
# Try to read optimal solution
|
||||||
mip_var_values = None
|
mip_var_values = None
|
||||||
try:
|
try:
|
||||||
with H5File(h5_filename, "r") as h5:
|
with H5File(h5_filename, "r") as h5:
|
||||||
mip_var_values = h5.get_array("mip_var_values")
|
mip_var_values = h5.get_array("mip_var_values")
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if mip_var_values is None:
|
if mip_var_values is None:
|
||||||
print(f"Removing empty/corrupted h5 file: {h5_filename}")
|
print(f"Removing empty/corrupted h5 file: {h5_filename}")
|
||||||
os.remove(h5_filename)
|
os.remove(h5_filename)
|
||||||
else:
|
else:
|
||||||
return
|
return
|
||||||
|
|
||||||
with H5File(h5_filename, "w") as h5:
|
with H5File(h5_filename, "w") as h5:
|
||||||
streams: List[Any] = [StringIO()]
|
h5.put_scalar("data_filename", data_filename)
|
||||||
if verbose:
|
streams: List[Any] = [StringIO()]
|
||||||
streams += [sys.stdout]
|
if verbose:
|
||||||
with _RedirectOutput(streams):
|
streams += [sys.stdout]
|
||||||
# Load and extract static features
|
with _RedirectOutput(streams):
|
||||||
model = build_model(data_filename)
|
# Load and extract static features
|
||||||
model.extract_after_load(h5)
|
model = build_model(data_filename)
|
||||||
|
model.extract_after_load(h5)
|
||||||
|
|
||||||
if not self.skip_lp:
|
if not self.skip_lp:
|
||||||
# Solve LP relaxation
|
# Solve LP relaxation
|
||||||
relaxed = model.relax()
|
relaxed = model.relax()
|
||||||
relaxed.optimize()
|
relaxed.optimize()
|
||||||
relaxed.extract_after_lp(h5)
|
relaxed.extract_after_lp(h5)
|
||||||
|
|
||||||
# Solve MIP
|
# Solve MIP
|
||||||
model.optimize()
|
model.optimize()
|
||||||
model.extract_after_mip(h5)
|
model.extract_after_mip(h5)
|
||||||
|
|
||||||
if self.write_mps:
|
if self.write_mps:
|
||||||
# Add lazy constraints to model
|
# Add lazy constraints to model
|
||||||
model._lazy_enforce_collected()
|
model._lazy_enforce_collected()
|
||||||
|
|
||||||
# Save MPS file
|
# Save MPS file
|
||||||
model.write(mps_filename)
|
model.write(mps_filename)
|
||||||
gzip(mps_filename)
|
gzip(mps_filename)
|
||||||
|
|
||||||
h5.put_scalar("mip_log", streams[0].getvalue())
|
log = streams[0].getvalue()
|
||||||
|
h5.put_scalar("mip_log", log)
|
||||||
|
if self.write_log:
|
||||||
|
with open(log_filename, "w") as log_file:
|
||||||
|
log_file.write(log)
|
||||||
|
except:
|
||||||
|
print(f"Error processing: data_filename")
|
||||||
|
traceback.print_exc()
|
||||||
|
|
||||||
if n_jobs > 1:
|
p_umap(
|
||||||
p_umap(
|
_collect,
|
||||||
_collect,
|
filenames,
|
||||||
filenames,
|
num_cpus=n_jobs,
|
||||||
num_cpus=n_jobs,
|
desc="collect",
|
||||||
desc="collect",
|
smoothing=0,
|
||||||
smoothing=0,
|
disable=not progress,
|
||||||
disable=not progress,
|
)
|
||||||
)
|
|
||||||
else:
|
|
||||||
for filename in filenames:
|
|
||||||
_collect(filename)
|
|
||||||
|
|||||||
@@ -1,29 +1,53 @@
|
|||||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||||
# Copyright (C) 2020-2022, UChicago Argonne, LLC. All rights reserved.
|
# Copyright (C) 2020-2022, UChicago Argonne, LLC. All rights reserved.
|
||||||
# Released under the modified BSD license. See COPYING.md for more details.
|
# Released under the modified BSD license. See COPYING.md for more details.
|
||||||
from typing import Tuple
|
from typing import Tuple, List
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
from miplearn.h5 import H5File
|
from miplearn.h5 import H5File
|
||||||
|
|
||||||
|
|
||||||
def _extract_bin_var_names_values(
|
def _extract_var_names_values(
|
||||||
h5: H5File,
|
h5: H5File,
|
||||||
|
selected_var_types: List[bytes],
|
||||||
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
|
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
|
||||||
bin_var_names, bin_var_indices = _extract_bin_var_names(h5)
|
bin_var_names, bin_var_indices = _extract_var_names(h5, selected_var_types)
|
||||||
var_values = h5.get_array("mip_var_values")
|
var_values = h5.get_array("mip_var_values")
|
||||||
assert var_values is not None
|
assert var_values is not None
|
||||||
bin_var_values = var_values[bin_var_indices].astype(int)
|
bin_var_values = var_values[bin_var_indices].astype(int)
|
||||||
return bin_var_names, bin_var_values, bin_var_indices
|
return bin_var_names, bin_var_values, bin_var_indices
|
||||||
|
|
||||||
|
|
||||||
def _extract_bin_var_names(h5: H5File) -> Tuple[np.ndarray, np.ndarray]:
|
def _extract_var_names(
|
||||||
|
h5: H5File,
|
||||||
|
selected_var_types: List[bytes],
|
||||||
|
) -> Tuple[np.ndarray, np.ndarray]:
|
||||||
var_types = h5.get_array("static_var_types")
|
var_types = h5.get_array("static_var_types")
|
||||||
var_names = h5.get_array("static_var_names")
|
var_names = h5.get_array("static_var_names")
|
||||||
assert var_types is not None
|
assert var_types is not None
|
||||||
assert var_names is not None
|
assert var_names is not None
|
||||||
bin_var_indices = np.where(var_types == b"B")[0]
|
bin_var_indices = np.where(np.isin(var_types, selected_var_types))[0]
|
||||||
bin_var_names = var_names[bin_var_indices]
|
bin_var_names = var_names[bin_var_indices]
|
||||||
assert len(bin_var_names.shape) == 1
|
assert len(bin_var_names.shape) == 1
|
||||||
return bin_var_names, bin_var_indices
|
return bin_var_names, bin_var_indices
|
||||||
|
|
||||||
|
|
||||||
|
def _extract_bin_var_names_values(
|
||||||
|
h5: H5File,
|
||||||
|
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
|
||||||
|
return _extract_var_names_values(h5, [b"B"])
|
||||||
|
|
||||||
|
|
||||||
|
def _extract_bin_var_names(h5: H5File) -> Tuple[np.ndarray, np.ndarray]:
|
||||||
|
return _extract_var_names(h5, [b"B"])
|
||||||
|
|
||||||
|
|
||||||
|
def _extract_int_var_names_values(
|
||||||
|
h5: H5File,
|
||||||
|
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
|
||||||
|
return _extract_var_names_values(h5, [b"B", b"I"])
|
||||||
|
|
||||||
|
|
||||||
|
def _extract_int_var_names(h5: H5File) -> Tuple[np.ndarray, np.ndarray]:
|
||||||
|
return _extract_var_names(h5, [b"B", b"I"])
|
||||||
|
|||||||
@@ -5,7 +5,7 @@
|
|||||||
import logging
|
import logging
|
||||||
from typing import Any, Dict, List
|
from typing import Any, Dict, List
|
||||||
|
|
||||||
from . import _extract_bin_var_names_values
|
from . import _extract_int_var_names_values
|
||||||
from .actions import PrimalComponentAction
|
from .actions import PrimalComponentAction
|
||||||
from ...solvers.abstract import AbstractModel
|
from ...solvers.abstract import AbstractModel
|
||||||
from ...h5 import H5File
|
from ...h5 import H5File
|
||||||
@@ -28,5 +28,5 @@ class ExpertPrimalComponent:
|
|||||||
self, test_h5: str, model: AbstractModel, stats: Dict[str, Any]
|
self, test_h5: str, model: AbstractModel, stats: Dict[str, Any]
|
||||||
) -> None:
|
) -> None:
|
||||||
with H5File(test_h5, "r") as h5:
|
with H5File(test_h5, "r") as h5:
|
||||||
names, values, _ = _extract_bin_var_names_values(h5)
|
names, values, _ = _extract_int_var_names_values(h5)
|
||||||
self.action.perform(model, names, values.reshape(1, -1), stats)
|
self.action.perform(model, names, values.reshape(1, -1), stats)
|
||||||
|
|||||||
@@ -28,4 +28,5 @@ class ExpertBranchPriorityComponent:
|
|||||||
for var_idx, var_name in enumerate(var_names):
|
for var_idx, var_name in enumerate(var_names):
|
||||||
if np.isfinite(var_priority[var_idx]):
|
if np.isfinite(var_priority[var_idx]):
|
||||||
var = model.getVarByName(var_name.decode())
|
var = model.getVarByName(var_name.decode())
|
||||||
var.branchPriority = int(log(1 + var_priority[var_idx]))
|
assert var is not None, f"unknown var: {var_name}"
|
||||||
|
var.BranchPriority = int(log(1 + var_priority[var_idx]))
|
||||||
|
|||||||
@@ -68,7 +68,7 @@ class H5File:
|
|||||||
return
|
return
|
||||||
self._assert_is_array(value)
|
self._assert_is_array(value)
|
||||||
if value.dtype.kind == "f":
|
if value.dtype.kind == "f":
|
||||||
value = value.astype("float32")
|
value = value.astype("float64")
|
||||||
if key in self.file:
|
if key in self.file:
|
||||||
del self.file[key]
|
del self.file[key]
|
||||||
return self.file.create_dataset(key, data=value, compression="gzip")
|
return self.file.create_dataset(key, data=value, compression="gzip")
|
||||||
|
|||||||
@@ -87,7 +87,10 @@ def read_pkl_gz(filename: str) -> Any:
|
|||||||
def _to_h5_filename(data_filename: str) -> str:
|
def _to_h5_filename(data_filename: str) -> str:
|
||||||
output = f"{data_filename}.h5"
|
output = f"{data_filename}.h5"
|
||||||
output = output.replace(".gz.h5", ".h5")
|
output = output.replace(".gz.h5", ".h5")
|
||||||
output = output.replace(".json.h5", ".h5")
|
output = output.replace(".csv.h5", ".h5")
|
||||||
output = output.replace(".pkl.h5", ".h5")
|
|
||||||
output = output.replace(".jld2.h5", ".h5")
|
output = output.replace(".jld2.h5", ".h5")
|
||||||
|
output = output.replace(".json.h5", ".h5")
|
||||||
|
output = output.replace(".lp.h5", ".h5")
|
||||||
|
output = output.replace(".mps.h5", ".h5")
|
||||||
|
output = output.replace(".pkl.h5", ".h5")
|
||||||
return output
|
return output
|
||||||
|
|||||||
@@ -34,19 +34,10 @@ class BinPackData:
|
|||||||
class BinPackGenerator:
|
class BinPackGenerator:
|
||||||
"""Random instance generator for the bin packing problem.
|
"""Random instance generator for the bin packing problem.
|
||||||
|
|
||||||
If `fix_items=False`, the class samples the user-provided probability distributions
|
Generates instances by sampling the user-provided probability distributions
|
||||||
n, sizes and capacity to decide, respectively, the number of items, the sizes of
|
n, sizes and capacity to decide, respectively, the number of items, the sizes of
|
||||||
the items and capacity of the bin. All values are sampled independently.
|
the items and capacity of the bin. All values are sampled independently.
|
||||||
|
|
||||||
If `fix_items=True`, the class creates a reference instance, using the method
|
|
||||||
previously described, then generates additional instances by perturbing its item
|
|
||||||
sizes and bin capacity. More specifically, the sizes of the items are set to `s_i
|
|
||||||
* gamma_i` where `s_i` is the size of the i-th item in the reference instance and
|
|
||||||
`gamma_i` is sampled from `sizes_jitter`. Similarly, the bin capacity is set to `B *
|
|
||||||
beta`, where `B` is the reference bin capacity and `beta` is sampled from
|
|
||||||
`capacity_jitter`. The number of items remains the same across all generated
|
|
||||||
instances.
|
|
||||||
|
|
||||||
Args
|
Args
|
||||||
----
|
----
|
||||||
n
|
n
|
||||||
@@ -55,13 +46,6 @@ class BinPackGenerator:
|
|||||||
Probability distribution for the item sizes.
|
Probability distribution for the item sizes.
|
||||||
capacity
|
capacity
|
||||||
Probability distribution for the bin capacity.
|
Probability distribution for the bin capacity.
|
||||||
sizes_jitter
|
|
||||||
Probability distribution for the item size randomization.
|
|
||||||
capacity_jitter
|
|
||||||
Probability distribution for the bin capacity.
|
|
||||||
fix_items
|
|
||||||
If `True`, generates a reference instance, then applies some perturbation to it.
|
|
||||||
If `False`, generates completely different instances.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
@@ -69,17 +53,10 @@ class BinPackGenerator:
|
|||||||
n: rv_frozen,
|
n: rv_frozen,
|
||||||
sizes: rv_frozen,
|
sizes: rv_frozen,
|
||||||
capacity: rv_frozen,
|
capacity: rv_frozen,
|
||||||
sizes_jitter: rv_frozen,
|
|
||||||
capacity_jitter: rv_frozen,
|
|
||||||
fix_items: bool,
|
|
||||||
) -> None:
|
) -> None:
|
||||||
self.n = n
|
self.n = n
|
||||||
self.sizes = sizes
|
self.sizes = sizes
|
||||||
self.capacity = capacity
|
self.capacity = capacity
|
||||||
self.sizes_jitter = sizes_jitter
|
|
||||||
self.capacity_jitter = capacity_jitter
|
|
||||||
self.fix_items = fix_items
|
|
||||||
self.ref_data: Optional[BinPackData] = None
|
|
||||||
|
|
||||||
def generate(self, n_samples: int) -> List[BinPackData]:
|
def generate(self, n_samples: int) -> List[BinPackData]:
|
||||||
"""Generates random instances.
|
"""Generates random instances.
|
||||||
@@ -91,22 +68,62 @@ class BinPackGenerator:
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
def _sample() -> BinPackData:
|
def _sample() -> BinPackData:
|
||||||
if self.ref_data is None:
|
n = self.n.rvs()
|
||||||
n = self.n.rvs()
|
sizes = self.sizes.rvs(n)
|
||||||
sizes = self.sizes.rvs(n)
|
capacity = self.capacity.rvs()
|
||||||
capacity = self.capacity.rvs()
|
|
||||||
if self.fix_items:
|
|
||||||
self.ref_data = BinPackData(sizes, capacity)
|
|
||||||
else:
|
|
||||||
n = self.ref_data.sizes.shape[0]
|
|
||||||
sizes = self.ref_data.sizes
|
|
||||||
capacity = self.ref_data.capacity
|
|
||||||
|
|
||||||
sizes = sizes * self.sizes_jitter.rvs(n)
|
|
||||||
capacity = capacity * self.capacity_jitter.rvs()
|
|
||||||
return BinPackData(sizes.round(2), capacity.round(2))
|
return BinPackData(sizes.round(2), capacity.round(2))
|
||||||
|
|
||||||
return [_sample() for n in range(n_samples)]
|
return [_sample() for _ in range(n_samples)]
|
||||||
|
|
||||||
|
|
||||||
|
class BinPackPerturber:
|
||||||
|
"""Perturbation generator for existing bin packing instances.
|
||||||
|
|
||||||
|
Takes an existing BinPackData instance and generates new instances by perturbing
|
||||||
|
its item sizes and bin capacity. The sizes of the items are set to `s_i * gamma_i`
|
||||||
|
where `s_i` is the size of the i-th item in the reference instance and `gamma_i`
|
||||||
|
is sampled from `sizes_jitter`. Similarly, the bin capacity is set to `B * beta`,
|
||||||
|
where `B` is the reference bin capacity and `beta` is sampled from `capacity_jitter`.
|
||||||
|
The number of items remains the same across all generated instances.
|
||||||
|
|
||||||
|
Args
|
||||||
|
----
|
||||||
|
sizes_jitter
|
||||||
|
Probability distribution for the item size randomization.
|
||||||
|
capacity_jitter
|
||||||
|
Probability distribution for the bin capacity randomization.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
sizes_jitter: rv_frozen,
|
||||||
|
capacity_jitter: rv_frozen,
|
||||||
|
) -> None:
|
||||||
|
self.sizes_jitter = sizes_jitter
|
||||||
|
self.capacity_jitter = capacity_jitter
|
||||||
|
|
||||||
|
def perturb(
|
||||||
|
self,
|
||||||
|
instance: BinPackData,
|
||||||
|
n_samples: int,
|
||||||
|
) -> List[BinPackData]:
|
||||||
|
"""Generates perturbed instances.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
instance
|
||||||
|
The reference instance to perturb.
|
||||||
|
n_samples
|
||||||
|
Number of samples to generate.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def _sample() -> BinPackData:
|
||||||
|
n = instance.sizes.shape[0]
|
||||||
|
sizes = instance.sizes * self.sizes_jitter.rvs(n)
|
||||||
|
capacity = instance.capacity * self.capacity_jitter.rvs()
|
||||||
|
return BinPackData(sizes.round(2), capacity.round(2))
|
||||||
|
|
||||||
|
return [_sample() for _ in range(n_samples)]
|
||||||
|
|
||||||
|
|
||||||
def build_binpack_model_gurobipy(data: Union[str, BinPackData]) -> GurobiModel:
|
def build_binpack_model_gurobipy(data: Union[str, BinPackData]) -> GurobiModel:
|
||||||
|
|||||||
174
miplearn/problems/maxcut.py
Normal file
174
miplearn/problems/maxcut.py
Normal file
@@ -0,0 +1,174 @@
|
|||||||
|
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||||
|
# Copyright (C) 2020-2025, UChicago Argonne, LLC. All rights reserved.
|
||||||
|
# Released under the modified BSD license. See COPYING.md for more details.
|
||||||
|
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from typing import List, Union, Optional, Any
|
||||||
|
|
||||||
|
import gurobipy as gp
|
||||||
|
import networkx as nx
|
||||||
|
import numpy as np
|
||||||
|
import pyomo.environ as pe
|
||||||
|
from networkx import Graph
|
||||||
|
from scipy.stats.distributions import rv_frozen
|
||||||
|
|
||||||
|
from miplearn.io import read_pkl_gz
|
||||||
|
from miplearn.problems import _gurobipy_set_params, _pyomo_set_params
|
||||||
|
from miplearn.solvers.gurobi import GurobiModel
|
||||||
|
from miplearn.solvers.pyomo import PyomoModel
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class MaxCutData:
|
||||||
|
graph: Graph
|
||||||
|
weights: np.ndarray
|
||||||
|
|
||||||
|
|
||||||
|
class MaxCutGenerator:
|
||||||
|
"""Random instance generator for the Maximum Cut Problem.
|
||||||
|
|
||||||
|
Generates instances by creating a new random Erdős-Rényi graph $G_{n,p}$ for each
|
||||||
|
instance, where $n$ and $p$ are sampled from user-provided probability distributions.
|
||||||
|
For each instance, the generator assigns random edge weights drawn from the set {-1, 1}
|
||||||
|
with equal probability.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
n: rv_frozen,
|
||||||
|
p: rv_frozen,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Initialize the problem generator.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
n: rv_discrete
|
||||||
|
Probability distribution for the number of nodes.
|
||||||
|
p: rv_continuous
|
||||||
|
Probability distribution for the graph density.
|
||||||
|
"""
|
||||||
|
assert isinstance(n, rv_frozen), "n should be a SciPy probability distribution"
|
||||||
|
assert isinstance(p, rv_frozen), "p should be a SciPy probability distribution"
|
||||||
|
self.n = n
|
||||||
|
self.p = p
|
||||||
|
|
||||||
|
def generate(self, n_samples: int) -> List[MaxCutData]:
|
||||||
|
def _sample() -> MaxCutData:
|
||||||
|
graph = self._generate_graph()
|
||||||
|
weights = self._generate_weights(graph)
|
||||||
|
return MaxCutData(graph, weights)
|
||||||
|
|
||||||
|
return [_sample() for _ in range(n_samples)]
|
||||||
|
|
||||||
|
def _generate_graph(self) -> Graph:
|
||||||
|
return nx.generators.random_graphs.binomial_graph(self.n.rvs(), self.p.rvs())
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _generate_weights(graph: Graph) -> np.ndarray:
|
||||||
|
m = graph.number_of_edges()
|
||||||
|
return np.random.randint(2, size=(m,)) * 2 - 1
|
||||||
|
|
||||||
|
|
||||||
|
class MaxCutPerturber:
|
||||||
|
"""Perturbation generator for existing Maximum Cut instances.
|
||||||
|
|
||||||
|
Takes an existing MaxCutData instance and generates new instances by randomly
|
||||||
|
flipping the sign of each edge weight with a given probability while keeping
|
||||||
|
the graph structure fixed.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
w_jitter: float = 0.05,
|
||||||
|
):
|
||||||
|
"""Initialize the perturbation generator.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
w_jitter: float
|
||||||
|
Probability that each edge weight flips sign (from -1 to 1 or vice versa).
|
||||||
|
"""
|
||||||
|
assert 0.0 <= w_jitter <= 1.0, "w_jitter should be between 0.0 and 1.0"
|
||||||
|
self.w_jitter = w_jitter
|
||||||
|
|
||||||
|
def perturb(
|
||||||
|
self,
|
||||||
|
instance: MaxCutData,
|
||||||
|
n_samples: int,
|
||||||
|
) -> List[MaxCutData]:
|
||||||
|
def _sample() -> MaxCutData:
|
||||||
|
jitter = self._generate_jitter(instance.graph)
|
||||||
|
weights = instance.weights * jitter
|
||||||
|
return MaxCutData(instance.graph, weights)
|
||||||
|
|
||||||
|
return [_sample() for _ in range(n_samples)]
|
||||||
|
|
||||||
|
def _generate_jitter(self, graph: Graph) -> np.ndarray:
|
||||||
|
m = graph.number_of_edges()
|
||||||
|
return (np.random.rand(m) >= self.w_jitter).astype(int) * 2 - 1
|
||||||
|
|
||||||
|
|
||||||
|
def build_maxcut_model_gurobipy(
|
||||||
|
data: Union[str, MaxCutData],
|
||||||
|
params: Optional[dict[str, Any]] = None,
|
||||||
|
) -> GurobiModel:
|
||||||
|
# Initialize model
|
||||||
|
model = gp.Model()
|
||||||
|
_gurobipy_set_params(model, params)
|
||||||
|
|
||||||
|
# Read data
|
||||||
|
data = _maxcut_read(data)
|
||||||
|
nodes = list(data.graph.nodes())
|
||||||
|
edges = list(data.graph.edges())
|
||||||
|
|
||||||
|
# Add decision variables
|
||||||
|
x = model.addVars(nodes, vtype=gp.GRB.BINARY, name="x")
|
||||||
|
|
||||||
|
# Add the objective function
|
||||||
|
model.setObjective(
|
||||||
|
gp.quicksum(
|
||||||
|
-data.weights[i] * x[e[0]] * (1 - x[e[1]]) for (i, e) in enumerate(edges)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
model.update()
|
||||||
|
return GurobiModel(model)
|
||||||
|
|
||||||
|
|
||||||
|
def build_maxcut_model_pyomo(
|
||||||
|
data: Union[str, MaxCutData],
|
||||||
|
solver: str = "gurobi_persistent",
|
||||||
|
params: Optional[dict[str, Any]] = None,
|
||||||
|
) -> PyomoModel:
|
||||||
|
# Initialize model
|
||||||
|
model = pe.ConcreteModel()
|
||||||
|
|
||||||
|
# Read data
|
||||||
|
data = _maxcut_read(data)
|
||||||
|
nodes = pe.Set(initialize=list(data.graph.nodes))
|
||||||
|
edges = list(data.graph.edges())
|
||||||
|
|
||||||
|
# Add decision variables
|
||||||
|
model.x = pe.Var(nodes, domain=pe.Binary, name="x")
|
||||||
|
|
||||||
|
# Add the objective function
|
||||||
|
model.obj = pe.Objective(
|
||||||
|
expr=pe.quicksum(
|
||||||
|
-data.weights[i] * model.x[e[0]]
|
||||||
|
+ data.weights[i] * model.x[e[0]] * model.x[e[1]]
|
||||||
|
for (i, e) in enumerate(edges)
|
||||||
|
),
|
||||||
|
sense=pe.minimize,
|
||||||
|
)
|
||||||
|
model.pprint()
|
||||||
|
pm = PyomoModel(model, solver)
|
||||||
|
_pyomo_set_params(model, params, solver)
|
||||||
|
return pm
|
||||||
|
|
||||||
|
|
||||||
|
def _maxcut_read(data: Union[str, MaxCutData]) -> MaxCutData:
|
||||||
|
if isinstance(data, str):
|
||||||
|
data = read_pkl_gz(data)
|
||||||
|
assert isinstance(data, MaxCutData)
|
||||||
|
return data
|
||||||
@@ -3,7 +3,7 @@
|
|||||||
# Released under the modified BSD license. See COPYING.md for more details.
|
# Released under the modified BSD license. See COPYING.md for more details.
|
||||||
|
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from typing import List, Optional, Union
|
from typing import List, Optional, Union, Callable
|
||||||
|
|
||||||
import gurobipy as gp
|
import gurobipy as gp
|
||||||
import numpy as np
|
import numpy as np
|
||||||
@@ -38,43 +38,19 @@ class MultiKnapsackData:
|
|||||||
class MultiKnapsackGenerator:
|
class MultiKnapsackGenerator:
|
||||||
"""Random instance generator for the multi-dimensional knapsack problem.
|
"""Random instance generator for the multi-dimensional knapsack problem.
|
||||||
|
|
||||||
Instances have a random number of items (or variables) and a random number of
|
Generates new instances by creating random items and knapsacks according to the
|
||||||
knapsacks (or constraints), as specified by the provided probability
|
provided probability distributions. Each instance has a random number of items
|
||||||
distributions `n` and `m`, respectively. The weight of each item `i` on knapsack
|
(variables) and knapsacks (constraints), with weights, prices, and capacities
|
||||||
`j` is sampled independently from the provided distribution `w`. The capacity of
|
sampled independently.
|
||||||
knapsack `j` is set to ``alpha_j * sum(w[i,j] for i in range(n))``,
|
|
||||||
where `alpha_j`, the tightness ratio, is sampled from the provided probability
|
|
||||||
distribution `alpha`.
|
|
||||||
|
|
||||||
To make the instances more challenging, the costs of the items are linearly
|
|
||||||
correlated to their average weights. More specifically, the weight of each item
|
|
||||||
`i` is set to ``sum(w[i,j]/m for j in range(m)) + K * u_i``, where `K`,
|
|
||||||
the correlation coefficient, and `u_i`, the correlation multiplier, are sampled
|
|
||||||
from the provided probability distributions. Note that `K` is only sample once
|
|
||||||
for the entire instance.
|
|
||||||
|
|
||||||
If `fix_w=True`, then `weights[i,j]` are kept the same in all generated
|
|
||||||
instances. This also implies that n and m are kept fixed. Although the prices and
|
|
||||||
capacities are derived from `weights[i,j]`, as long as `u` and `K` are not
|
|
||||||
constants, the generated instances will still not be completely identical.
|
|
||||||
|
|
||||||
If a probability distribution `w_jitter` is provided, then item weights will be
|
|
||||||
set to ``w[i,j] * gamma[i,j]`` where `gamma[i,j]` is sampled from `w_jitter`.
|
|
||||||
When combined with `fix_w=True`, this argument may be used to generate instances
|
|
||||||
where the weight of each item is roughly the same, but not exactly identical,
|
|
||||||
across all instances. The prices of the items and the capacities of the knapsacks
|
|
||||||
will be calculated as above, but using these perturbed weights instead.
|
|
||||||
|
|
||||||
By default, all generated prices, weights and capacities are rounded to the
|
|
||||||
nearest integer number. If `round=False` is provided, this rounding will be
|
|
||||||
disabled.
|
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
n: rv_discrete
|
n: rv_discrete
|
||||||
Probability distribution for the number of items (or variables).
|
Probability distribution for the number of items (or variables).
|
||||||
m: rv_discrete
|
m: rv_discrete or callable
|
||||||
Probability distribution for the number of knapsacks (or constraints).
|
Probability distribution for the number of knapsacks (or constraints), or a
|
||||||
|
callable that takes the numer of items and returns the number of knapsacks
|
||||||
|
(e.g., lambda n: n//3).
|
||||||
w: rv_continuous
|
w: rv_continuous
|
||||||
Probability distribution for the item weights.
|
Probability distribution for the item weights.
|
||||||
K: rv_continuous
|
K: rv_continuous
|
||||||
@@ -83,11 +59,6 @@ class MultiKnapsackGenerator:
|
|||||||
Probability distribution for the profit multiplier.
|
Probability distribution for the profit multiplier.
|
||||||
alpha: rv_continuous
|
alpha: rv_continuous
|
||||||
Probability distribution for the tightness ratio.
|
Probability distribution for the tightness ratio.
|
||||||
fix_w: boolean
|
|
||||||
If true, weights are kept the same (minus the noise from w_jitter) in all
|
|
||||||
instances.
|
|
||||||
w_jitter: rv_continuous
|
|
||||||
Probability distribution for random noise added to the weights.
|
|
||||||
round: boolean
|
round: boolean
|
||||||
If true, all prices, weights and capacities are rounded to the nearest
|
If true, all prices, weights and capacities are rounded to the nearest
|
||||||
integer.
|
integer.
|
||||||
@@ -96,28 +67,23 @@ class MultiKnapsackGenerator:
|
|||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
n: rv_frozen = randint(low=100, high=101),
|
n: rv_frozen = randint(low=100, high=101),
|
||||||
m: rv_frozen = randint(low=30, high=31),
|
m: Union[rv_frozen, Callable] = randint(low=30, high=31),
|
||||||
w: rv_frozen = randint(low=0, high=1000),
|
w: rv_frozen = randint(low=0, high=1000),
|
||||||
K: rv_frozen = randint(low=500, high=501),
|
K: rv_frozen = randint(low=500, high=501),
|
||||||
u: rv_frozen = uniform(loc=0.0, scale=1.0),
|
u: rv_frozen = uniform(loc=0.0, scale=1.0),
|
||||||
alpha: rv_frozen = uniform(loc=0.25, scale=0.0),
|
alpha: rv_frozen = uniform(loc=0.25, scale=0.0),
|
||||||
fix_w: bool = False,
|
|
||||||
w_jitter: rv_frozen = uniform(loc=1.0, scale=0.0),
|
|
||||||
p_jitter: rv_frozen = uniform(loc=1.0, scale=0.0),
|
|
||||||
round: bool = True,
|
round: bool = True,
|
||||||
):
|
):
|
||||||
assert isinstance(n, rv_frozen), "n should be a SciPy probability distribution"
|
assert isinstance(n, rv_frozen), "n should be a SciPy probability distribution"
|
||||||
assert isinstance(m, rv_frozen), "m should be a SciPy probability distribution"
|
assert isinstance(m, rv_frozen) or callable(
|
||||||
|
m
|
||||||
|
), "m should be a SciPy probability distribution or callable"
|
||||||
assert isinstance(w, rv_frozen), "w should be a SciPy probability distribution"
|
assert isinstance(w, rv_frozen), "w should be a SciPy probability distribution"
|
||||||
assert isinstance(K, rv_frozen), "K should be a SciPy probability distribution"
|
assert isinstance(K, rv_frozen), "K should be a SciPy probability distribution"
|
||||||
assert isinstance(u, rv_frozen), "u should be a SciPy probability distribution"
|
assert isinstance(u, rv_frozen), "u should be a SciPy probability distribution"
|
||||||
assert isinstance(
|
assert isinstance(
|
||||||
alpha, rv_frozen
|
alpha, rv_frozen
|
||||||
), "alpha should be a SciPy probability distribution"
|
), "alpha should be a SciPy probability distribution"
|
||||||
assert isinstance(fix_w, bool), "fix_w should be boolean"
|
|
||||||
assert isinstance(
|
|
||||||
w_jitter, rv_frozen
|
|
||||||
), "w_jitter should be a SciPy probability distribution"
|
|
||||||
|
|
||||||
self.n = n
|
self.n = n
|
||||||
self.m = m
|
self.m = m
|
||||||
@@ -125,45 +91,20 @@ class MultiKnapsackGenerator:
|
|||||||
self.u = u
|
self.u = u
|
||||||
self.K = K
|
self.K = K
|
||||||
self.alpha = alpha
|
self.alpha = alpha
|
||||||
self.w_jitter = w_jitter
|
|
||||||
self.p_jitter = p_jitter
|
|
||||||
self.round = round
|
self.round = round
|
||||||
self.fix_n: Optional[int] = None
|
|
||||||
self.fix_m: Optional[int] = None
|
|
||||||
self.fix_w: Optional[np.ndarray] = None
|
|
||||||
self.fix_u: Optional[np.ndarray] = None
|
|
||||||
self.fix_K: Optional[float] = None
|
|
||||||
|
|
||||||
if fix_w:
|
|
||||||
self.fix_n = self.n.rvs()
|
|
||||||
self.fix_m = self.m.rvs()
|
|
||||||
self.fix_w = np.array([self.w.rvs(self.fix_n) for _ in range(self.fix_m)])
|
|
||||||
self.fix_u = self.u.rvs(self.fix_n)
|
|
||||||
self.fix_K = self.K.rvs()
|
|
||||||
|
|
||||||
def generate(self, n_samples: int) -> List[MultiKnapsackData]:
|
def generate(self, n_samples: int) -> List[MultiKnapsackData]:
|
||||||
def _sample() -> MultiKnapsackData:
|
def _sample() -> MultiKnapsackData:
|
||||||
if self.fix_w is not None:
|
n = self.n.rvs()
|
||||||
assert self.fix_m is not None
|
if callable(self.m):
|
||||||
assert self.fix_n is not None
|
m = self.m(n)
|
||||||
assert self.fix_u is not None
|
|
||||||
assert self.fix_K is not None
|
|
||||||
n = self.fix_n
|
|
||||||
m = self.fix_m
|
|
||||||
w = self.fix_w
|
|
||||||
u = self.fix_u
|
|
||||||
K = self.fix_K
|
|
||||||
else:
|
else:
|
||||||
n = self.n.rvs()
|
|
||||||
m = self.m.rvs()
|
m = self.m.rvs()
|
||||||
w = np.array([self.w.rvs(n) for _ in range(m)])
|
w = np.array([self.w.rvs(n) for _ in range(m)])
|
||||||
u = self.u.rvs(n)
|
u = self.u.rvs(n)
|
||||||
K = self.K.rvs()
|
K = self.K.rvs()
|
||||||
w = w * np.array([self.w_jitter.rvs(n) for _ in range(m)])
|
|
||||||
alpha = self.alpha.rvs(m)
|
alpha = self.alpha.rvs(m)
|
||||||
p = np.array(
|
p = np.array([w[:, j].sum() / m + K * u[j] for j in range(n)])
|
||||||
[w[:, j].sum() / m + K * u[j] for j in range(n)]
|
|
||||||
) * self.p_jitter.rvs(n)
|
|
||||||
b = np.array([w[i, :].sum() * alpha[i] for i in range(m)])
|
b = np.array([w[i, :].sum() * alpha[i] for i in range(m)])
|
||||||
if self.round:
|
if self.round:
|
||||||
p = p.round()
|
p = p.round()
|
||||||
@@ -174,6 +115,72 @@ class MultiKnapsackGenerator:
|
|||||||
return [_sample() for _ in range(n_samples)]
|
return [_sample() for _ in range(n_samples)]
|
||||||
|
|
||||||
|
|
||||||
|
class MultiKnapsackPerturber:
|
||||||
|
"""Perturbation generator for existing multi-dimensional knapsack instances.
|
||||||
|
|
||||||
|
Takes an existing MultiKnapsackData instance and generates new instances by
|
||||||
|
applying randomization factors to the existing weights and prices while keeping
|
||||||
|
the structure (number of items and knapsacks) fixed.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
w_jitter: rv_continuous
|
||||||
|
Probability distribution for randomization factors applied to item weights.
|
||||||
|
p_jitter: rv_continuous
|
||||||
|
Probability distribution for randomization factors applied to item prices.
|
||||||
|
alpha_jitter: rv_continuous
|
||||||
|
Probability distribution for randomization factors applied to knapsack capacities.
|
||||||
|
round: boolean
|
||||||
|
If true, all perturbed prices, weights and capacities are rounded to the
|
||||||
|
nearest integer.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
w_jitter: rv_frozen = uniform(loc=0.9, scale=0.2),
|
||||||
|
p_jitter: rv_frozen = uniform(loc=0.9, scale=0.2),
|
||||||
|
alpha_jitter: rv_frozen = uniform(loc=0.9, scale=0.2),
|
||||||
|
round: bool = True,
|
||||||
|
):
|
||||||
|
assert isinstance(
|
||||||
|
w_jitter, rv_frozen
|
||||||
|
), "w_jitter should be a SciPy probability distribution"
|
||||||
|
assert isinstance(
|
||||||
|
p_jitter, rv_frozen
|
||||||
|
), "p_jitter should be a SciPy probability distribution"
|
||||||
|
assert isinstance(
|
||||||
|
alpha_jitter, rv_frozen
|
||||||
|
), "alpha_jitter should be a SciPy probability distribution"
|
||||||
|
|
||||||
|
self.w_jitter = w_jitter
|
||||||
|
self.p_jitter = p_jitter
|
||||||
|
self.alpha_jitter = alpha_jitter
|
||||||
|
self.round = round
|
||||||
|
|
||||||
|
def perturb(
|
||||||
|
self,
|
||||||
|
instance: MultiKnapsackData,
|
||||||
|
n_samples: int,
|
||||||
|
) -> List[MultiKnapsackData]:
|
||||||
|
def _sample() -> MultiKnapsackData:
|
||||||
|
m, n = instance.weights.shape
|
||||||
|
w_factors = np.array([self.w_jitter.rvs(n) for _ in range(m)])
|
||||||
|
p_factors = self.p_jitter.rvs(n)
|
||||||
|
alpha_factors = self.alpha_jitter.rvs(m)
|
||||||
|
|
||||||
|
w = instance.weights * w_factors
|
||||||
|
p = instance.prices * p_factors
|
||||||
|
b = instance.capacities * alpha_factors
|
||||||
|
|
||||||
|
if self.round:
|
||||||
|
p = p.round()
|
||||||
|
b = b.round()
|
||||||
|
w = w.round()
|
||||||
|
return MultiKnapsackData(p, b, w)
|
||||||
|
|
||||||
|
return [_sample() for _ in range(n_samples)]
|
||||||
|
|
||||||
|
|
||||||
def build_multiknapsack_model_gurobipy(
|
def build_multiknapsack_model_gurobipy(
|
||||||
data: Union[str, MultiKnapsackData]
|
data: Union[str, MultiKnapsackData]
|
||||||
) -> GurobiModel:
|
) -> GurobiModel:
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
# Released under the modified BSD license. See COPYING.md for more details.
|
# Released under the modified BSD license. See COPYING.md for more details.
|
||||||
|
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from typing import List, Optional, Union
|
from typing import List, Optional, Union, Callable
|
||||||
|
|
||||||
import gurobipy as gp
|
import gurobipy as gp
|
||||||
import numpy as np
|
import numpy as np
|
||||||
@@ -49,15 +49,6 @@ class PMedianGenerator:
|
|||||||
`demands` and `capacities`, respectively. Finally, the costs `w[i,j]` are set to
|
`demands` and `capacities`, respectively. Finally, the costs `w[i,j]` are set to
|
||||||
the Euclidean distance between the locations of customers `i` and `j`.
|
the Euclidean distance between the locations of customers `i` and `j`.
|
||||||
|
|
||||||
If `fixed=True`, then the number of customers, their locations, the parameter
|
|
||||||
`p`, the demands and the capacities are only sampled from their respective
|
|
||||||
distributions exactly once, to build a reference instance which is then
|
|
||||||
perturbed. Specifically, for each perturbation, the distances, demands and
|
|
||||||
capacities are multiplied by factors sampled from the distributions
|
|
||||||
`distances_jitter`, `demands_jitter` and `capacities_jitter`, respectively. The
|
|
||||||
result is a list of instances that have the same set of customers, but slightly
|
|
||||||
different demands, capacities and distances.
|
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
x
|
x
|
||||||
@@ -67,19 +58,12 @@ class PMedianGenerator:
|
|||||||
n
|
n
|
||||||
Probability distribution for the number of customer.
|
Probability distribution for the number of customer.
|
||||||
p
|
p
|
||||||
Probability distribution for the number of medians.
|
Probability distribution for the number of medians, or a callable that takes
|
||||||
|
the number of customers and returns the number of medians (e.g., lambda n: n//10).
|
||||||
demands
|
demands
|
||||||
Probability distribution for the customer demands.
|
Probability distribution for the customer demands.
|
||||||
capacities
|
capacities
|
||||||
Probability distribution for the facility capacities.
|
Probability distribution for the facility capacities.
|
||||||
distances_jitter
|
|
||||||
Probability distribution for the random scaling factor applied to distances.
|
|
||||||
demands_jitter
|
|
||||||
Probability distribution for the random scaling factor applied to demands.
|
|
||||||
capacities_jitter
|
|
||||||
Probability distribution for the random scaling factor applied to capacities.
|
|
||||||
fixed
|
|
||||||
If `True`, then customer are kept the same across instances.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
@@ -87,44 +71,41 @@ class PMedianGenerator:
|
|||||||
x: rv_frozen = uniform(loc=0.0, scale=100.0),
|
x: rv_frozen = uniform(loc=0.0, scale=100.0),
|
||||||
y: rv_frozen = uniform(loc=0.0, scale=100.0),
|
y: rv_frozen = uniform(loc=0.0, scale=100.0),
|
||||||
n: rv_frozen = randint(low=100, high=101),
|
n: rv_frozen = randint(low=100, high=101),
|
||||||
p: rv_frozen = randint(low=10, high=11),
|
p: Union[rv_frozen, Callable] = randint(low=10, high=11),
|
||||||
demands: rv_frozen = uniform(loc=0, scale=20),
|
demands: rv_frozen = uniform(loc=0, scale=20),
|
||||||
capacities: rv_frozen = uniform(loc=0, scale=100),
|
capacities: rv_frozen = uniform(loc=0, scale=100),
|
||||||
distances_jitter: rv_frozen = uniform(loc=1.0, scale=0.0),
|
|
||||||
demands_jitter: rv_frozen = uniform(loc=1.0, scale=0.0),
|
|
||||||
capacities_jitter: rv_frozen = uniform(loc=1.0, scale=0.0),
|
|
||||||
fixed: bool = True,
|
|
||||||
):
|
):
|
||||||
|
assert isinstance(x, rv_frozen), "x should be a SciPy probability distribution"
|
||||||
|
assert isinstance(y, rv_frozen), "y should be a SciPy probability distribution"
|
||||||
|
assert isinstance(n, rv_frozen), "n should be a SciPy probability distribution"
|
||||||
|
assert isinstance(p, rv_frozen) or callable(
|
||||||
|
p
|
||||||
|
), "p should be a SciPy probability distribution or callable"
|
||||||
|
assert isinstance(
|
||||||
|
demands, rv_frozen
|
||||||
|
), "demands should be a SciPy probability distribution"
|
||||||
|
assert isinstance(
|
||||||
|
capacities, rv_frozen
|
||||||
|
), "capacities should be a SciPy probability distribution"
|
||||||
|
|
||||||
self.x = x
|
self.x = x
|
||||||
self.y = y
|
self.y = y
|
||||||
self.n = n
|
self.n = n
|
||||||
self.p = p
|
self.p = p
|
||||||
self.demands = demands
|
self.demands = demands
|
||||||
self.capacities = capacities
|
self.capacities = capacities
|
||||||
self.distances_jitter = distances_jitter
|
|
||||||
self.demands_jitter = demands_jitter
|
|
||||||
self.capacities_jitter = capacities_jitter
|
|
||||||
self.fixed = fixed
|
|
||||||
self.ref_data: Optional[PMedianData] = None
|
|
||||||
|
|
||||||
def generate(self, n_samples: int) -> List[PMedianData]:
|
def generate(self, n_samples: int) -> List[PMedianData]:
|
||||||
def _sample() -> PMedianData:
|
def _sample() -> PMedianData:
|
||||||
if self.ref_data is None:
|
n = self.n.rvs()
|
||||||
n = self.n.rvs()
|
if callable(self.p):
|
||||||
p = self.p.rvs()
|
p = self.p(n)
|
||||||
loc = np.array([(self.x.rvs(), self.y.rvs()) for _ in range(n)])
|
|
||||||
distances = squareform(pdist(loc))
|
|
||||||
demands = self.demands.rvs(n)
|
|
||||||
capacities = self.capacities.rvs(n)
|
|
||||||
else:
|
else:
|
||||||
n = self.ref_data.demands.shape[0]
|
p = self.p.rvs()
|
||||||
distances = self.ref_data.distances * self.distances_jitter.rvs(
|
loc = np.array([(self.x.rvs(), self.y.rvs()) for _ in range(n)])
|
||||||
size=(n, n)
|
distances = squareform(pdist(loc))
|
||||||
)
|
demands = self.demands.rvs(n)
|
||||||
distances = np.tril(distances) + np.triu(distances.T, 1)
|
capacities = self.capacities.rvs(n)
|
||||||
demands = self.ref_data.demands * self.demands_jitter.rvs(n)
|
|
||||||
capacities = self.ref_data.capacities * self.capacities_jitter.rvs(n)
|
|
||||||
p = self.ref_data.p
|
|
||||||
|
|
||||||
data = PMedianData(
|
data = PMedianData(
|
||||||
distances=distances.round(2),
|
distances=distances.round(2),
|
||||||
@@ -133,14 +114,62 @@ class PMedianGenerator:
|
|||||||
capacities=capacities.round(2),
|
capacities=capacities.round(2),
|
||||||
)
|
)
|
||||||
|
|
||||||
if self.fixed and self.ref_data is None:
|
|
||||||
self.ref_data = data
|
|
||||||
|
|
||||||
return data
|
return data
|
||||||
|
|
||||||
return [_sample() for _ in range(n_samples)]
|
return [_sample() for _ in range(n_samples)]
|
||||||
|
|
||||||
|
|
||||||
|
class PMedianPerturber:
|
||||||
|
"""Perturbation generator for existing p-median instances.
|
||||||
|
|
||||||
|
Takes an existing PMedianData instance and generates new instances by applying
|
||||||
|
randomization factors to the existing distances, demands, and capacities while
|
||||||
|
keeping the graph structure and parameter p fixed.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
distances_jitter: rv_frozen = uniform(loc=1.0, scale=0.0),
|
||||||
|
demands_jitter: rv_frozen = uniform(loc=1.0, scale=0.0),
|
||||||
|
capacities_jitter: rv_frozen = uniform(loc=1.0, scale=0.0),
|
||||||
|
):
|
||||||
|
"""Initialize the perturbation generator.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
distances_jitter
|
||||||
|
Probability distribution for randomization factors applied to distances.
|
||||||
|
demands_jitter
|
||||||
|
Probability distribution for randomization factors applied to demands.
|
||||||
|
capacities_jitter
|
||||||
|
Probability distribution for randomization factors applied to capacities.
|
||||||
|
"""
|
||||||
|
self.distances_jitter = distances_jitter
|
||||||
|
self.demands_jitter = demands_jitter
|
||||||
|
self.capacities_jitter = capacities_jitter
|
||||||
|
|
||||||
|
def perturb(
|
||||||
|
self,
|
||||||
|
instance: PMedianData,
|
||||||
|
n_samples: int,
|
||||||
|
) -> List[PMedianData]:
|
||||||
|
def _sample() -> PMedianData:
|
||||||
|
n = instance.demands.shape[0]
|
||||||
|
distances = instance.distances * self.distances_jitter.rvs(size=(n, n))
|
||||||
|
distances = np.tril(distances) + np.triu(distances.T, 1)
|
||||||
|
demands = instance.demands * self.demands_jitter.rvs(n)
|
||||||
|
capacities = instance.capacities * self.capacities_jitter.rvs(n)
|
||||||
|
|
||||||
|
return PMedianData(
|
||||||
|
distances=distances.round(2),
|
||||||
|
demands=demands.round(2),
|
||||||
|
p=instance.p,
|
||||||
|
capacities=capacities.round(2),
|
||||||
|
)
|
||||||
|
|
||||||
|
return [_sample() for _ in range(n_samples)]
|
||||||
|
|
||||||
|
|
||||||
def build_pmedian_model_gurobipy(data: Union[str, PMedianData]) -> GurobiModel:
|
def build_pmedian_model_gurobipy(data: Union[str, PMedianData]) -> GurobiModel:
|
||||||
"""Converts capacitated p-median data into a concrete Gurobipy model."""
|
"""Converts capacitated p-median data into a concrete Gurobipy model."""
|
||||||
if isinstance(data, str):
|
if isinstance(data, str):
|
||||||
|
|||||||
@@ -3,12 +3,12 @@
|
|||||||
# Released under the modified BSD license. See COPYING.md for more details.
|
# Released under the modified BSD license. See COPYING.md for more details.
|
||||||
|
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from typing import List, Union
|
from typing import List, Union, Callable
|
||||||
|
|
||||||
import gurobipy as gp
|
import gurobipy as gp
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import pyomo.environ as pe
|
import pyomo.environ as pe
|
||||||
from gurobipy.gurobipy import GRB
|
from gurobipy import GRB
|
||||||
from scipy.stats import uniform, randint
|
from scipy.stats import uniform, randint
|
||||||
from scipy.stats.distributions import rv_frozen
|
from scipy.stats.distributions import rv_frozen
|
||||||
|
|
||||||
@@ -24,56 +24,79 @@ class SetCoverData:
|
|||||||
|
|
||||||
|
|
||||||
class SetCoverGenerator:
|
class SetCoverGenerator:
|
||||||
|
"""Random instance generator for the Set Cover Problem.
|
||||||
|
|
||||||
|
Generates instances by creating a new random incidence matrix for each
|
||||||
|
instance, where the number of elements, sets, density, and costs are sampled
|
||||||
|
from user-provided probability distributions.
|
||||||
|
"""
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
n_elements: rv_frozen = randint(low=50, high=51),
|
n_elements: rv_frozen = randint(low=50, high=51),
|
||||||
n_sets: rv_frozen = randint(low=100, high=101),
|
n_sets: Union[rv_frozen, Callable] = randint(low=100, high=101),
|
||||||
costs: rv_frozen = uniform(loc=0.0, scale=100.0),
|
costs: rv_frozen = uniform(loc=0.0, scale=100.0),
|
||||||
costs_jitter: rv_frozen = uniform(loc=-5.0, scale=10.0),
|
|
||||||
K: rv_frozen = uniform(loc=25.0, scale=0.0),
|
K: rv_frozen = uniform(loc=25.0, scale=0.0),
|
||||||
density: rv_frozen = uniform(loc=0.02, scale=0.00),
|
density: rv_frozen = uniform(loc=0.02, scale=0.00),
|
||||||
fix_sets: bool = True,
|
|
||||||
):
|
):
|
||||||
|
"""Initialize the problem generator.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
n_elements: rv_discrete
|
||||||
|
Probability distribution for number of elements.
|
||||||
|
n_sets: rv_discrete or callable
|
||||||
|
Probability distribution for number of sets, or a callable that takes
|
||||||
|
the number of elements and returns the number of sets.
|
||||||
|
costs: rv_continuous
|
||||||
|
Probability distribution for base set costs.
|
||||||
|
K: rv_continuous
|
||||||
|
Probability distribution for cost scaling factor based on set size.
|
||||||
|
density: rv_continuous
|
||||||
|
Probability distribution for incidence matrix density.
|
||||||
|
"""
|
||||||
|
assert isinstance(
|
||||||
|
n_elements, rv_frozen
|
||||||
|
), "n_elements should be a SciPy probability distribution"
|
||||||
|
assert isinstance(n_sets, rv_frozen) or callable(
|
||||||
|
n_sets
|
||||||
|
), "n_sets should be a SciPy probability distribution or callable"
|
||||||
|
assert isinstance(
|
||||||
|
costs, rv_frozen
|
||||||
|
), "costs should be a SciPy probability distribution"
|
||||||
|
assert isinstance(K, rv_frozen), "K should be a SciPy probability distribution"
|
||||||
|
assert isinstance(
|
||||||
|
density, rv_frozen
|
||||||
|
), "density should be a SciPy probability distribution"
|
||||||
self.n_elements = n_elements
|
self.n_elements = n_elements
|
||||||
self.n_sets = n_sets
|
self.n_sets = n_sets
|
||||||
self.costs = costs
|
self.costs = costs
|
||||||
self.costs_jitter = costs_jitter
|
|
||||||
self.density = density
|
self.density = density
|
||||||
self.K = K
|
self.K = K
|
||||||
self.fix_sets = fix_sets
|
|
||||||
self.fixed_costs = None
|
|
||||||
self.fixed_matrix = None
|
|
||||||
|
|
||||||
def generate(self, n_samples: int) -> List[SetCoverData]:
|
def generate(self, n_samples: int) -> List[SetCoverData]:
|
||||||
def _sample() -> SetCoverData:
|
def _sample() -> SetCoverData:
|
||||||
if self.fixed_matrix is None:
|
n_elements = self.n_elements.rvs()
|
||||||
n_sets = self.n_sets.rvs()
|
if callable(self.n_sets):
|
||||||
n_elements = self.n_elements.rvs()
|
n_sets = self.n_sets(n_elements)
|
||||||
density = self.density.rvs()
|
|
||||||
|
|
||||||
incidence_matrix = np.random.rand(n_elements, n_sets) < density
|
|
||||||
incidence_matrix = incidence_matrix.astype(int)
|
|
||||||
|
|
||||||
# Ensure each element belongs to at least one set
|
|
||||||
for j in range(n_elements):
|
|
||||||
if incidence_matrix[j, :].sum() == 0:
|
|
||||||
incidence_matrix[j, randint(low=0, high=n_sets).rvs()] = 1
|
|
||||||
|
|
||||||
# Ensure each set contains at least one element
|
|
||||||
for i in range(n_sets):
|
|
||||||
if incidence_matrix[:, i].sum() == 0:
|
|
||||||
incidence_matrix[randint(low=0, high=n_elements).rvs(), i] = 1
|
|
||||||
|
|
||||||
costs = self.costs.rvs(n_sets) + self.K.rvs() * incidence_matrix.sum(
|
|
||||||
axis=0
|
|
||||||
)
|
|
||||||
if self.fix_sets:
|
|
||||||
self.fixed_matrix = incidence_matrix
|
|
||||||
self.fixed_costs = costs
|
|
||||||
else:
|
else:
|
||||||
incidence_matrix = self.fixed_matrix
|
n_sets = self.n_sets.rvs()
|
||||||
(_, n_sets) = incidence_matrix.shape
|
density = self.density.rvs()
|
||||||
costs = self.fixed_costs * self.costs_jitter.rvs(n_sets)
|
|
||||||
|
incidence_matrix = np.random.rand(n_elements, n_sets) < density
|
||||||
|
incidence_matrix = incidence_matrix.astype(int)
|
||||||
|
|
||||||
|
# Ensure each element belongs to at least one set
|
||||||
|
for j in range(n_elements):
|
||||||
|
if incidence_matrix[j, :].sum() == 0:
|
||||||
|
incidence_matrix[j, randint(low=0, high=n_sets).rvs()] = 1
|
||||||
|
|
||||||
|
# Ensure each set contains at least one element
|
||||||
|
for i in range(n_sets):
|
||||||
|
if incidence_matrix[:, i].sum() == 0:
|
||||||
|
incidence_matrix[randint(low=0, high=n_elements).rvs(), i] = 1
|
||||||
|
|
||||||
|
costs = self.costs.rvs(n_sets) + self.K.rvs() * incidence_matrix.sum(axis=0)
|
||||||
return SetCoverData(
|
return SetCoverData(
|
||||||
costs=costs.round(2),
|
costs=costs.round(2),
|
||||||
incidence_matrix=incidence_matrix,
|
incidence_matrix=incidence_matrix,
|
||||||
@@ -82,6 +105,47 @@ class SetCoverGenerator:
|
|||||||
return [_sample() for _ in range(n_samples)]
|
return [_sample() for _ in range(n_samples)]
|
||||||
|
|
||||||
|
|
||||||
|
class SetCoverPerturber:
|
||||||
|
"""Perturbation generator for existing Set Cover instances.
|
||||||
|
|
||||||
|
Takes an existing SetCoverData instance and generates new instances
|
||||||
|
by applying randomization factors to the existing costs while keeping the
|
||||||
|
incidence matrix fixed.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
costs_jitter: rv_frozen = uniform(loc=0.9, scale=0.2),
|
||||||
|
):
|
||||||
|
"""Initialize the perturbation generator.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
costs_jitter: rv_continuous
|
||||||
|
Probability distribution for randomization factors applied to set costs.
|
||||||
|
"""
|
||||||
|
assert isinstance(
|
||||||
|
costs_jitter, rv_frozen
|
||||||
|
), "costs_jitter should be a SciPy probability distribution"
|
||||||
|
self.costs_jitter = costs_jitter
|
||||||
|
|
||||||
|
def perturb(
|
||||||
|
self,
|
||||||
|
instance: SetCoverData,
|
||||||
|
n_samples: int,
|
||||||
|
) -> List[SetCoverData]:
|
||||||
|
def _sample() -> SetCoverData:
|
||||||
|
(_, n_sets) = instance.incidence_matrix.shape
|
||||||
|
jitter_factors = self.costs_jitter.rvs(n_sets)
|
||||||
|
costs = np.round(instance.costs * jitter_factors, 2)
|
||||||
|
return SetCoverData(
|
||||||
|
costs=costs,
|
||||||
|
incidence_matrix=instance.incidence_matrix,
|
||||||
|
)
|
||||||
|
|
||||||
|
return [_sample() for _ in range(n_samples)]
|
||||||
|
|
||||||
|
|
||||||
def build_setcover_model_gurobipy(data: Union[str, SetCoverData]) -> GurobiModel:
|
def build_setcover_model_gurobipy(data: Union[str, SetCoverData]) -> GurobiModel:
|
||||||
data = _read_setcover_data(data)
|
data = _read_setcover_data(data)
|
||||||
(n_elements, n_sets) = data.incidence_matrix.shape
|
(n_elements, n_sets) = data.incidence_matrix.shape
|
||||||
|
|||||||
@@ -3,15 +3,15 @@
|
|||||||
# Released under the modified BSD license. See COPYING.md for more details.
|
# Released under the modified BSD license. See COPYING.md for more details.
|
||||||
|
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from typing import List, Union
|
from typing import List, Union, Callable
|
||||||
|
|
||||||
import gurobipy as gp
|
import gurobipy as gp
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from gurobipy.gurobipy import GRB
|
from gurobipy import GRB
|
||||||
from scipy.stats import uniform, randint
|
from scipy.stats import uniform, randint
|
||||||
from scipy.stats.distributions import rv_frozen
|
from scipy.stats.distributions import rv_frozen
|
||||||
|
|
||||||
from .setcover import SetCoverGenerator
|
from .setcover import SetCoverGenerator, SetCoverPerturber
|
||||||
from miplearn.solvers.gurobi import GurobiModel
|
from miplearn.solvers.gurobi import GurobiModel
|
||||||
from ..io import read_pkl_gz
|
from ..io import read_pkl_gz
|
||||||
|
|
||||||
@@ -23,24 +23,56 @@ class SetPackData:
|
|||||||
|
|
||||||
|
|
||||||
class SetPackGenerator:
|
class SetPackGenerator:
|
||||||
|
"""Random instance generator for the Set Packing Problem.
|
||||||
|
|
||||||
|
Generates instances by creating a new random incidence matrix for each
|
||||||
|
instance, where the number of elements, sets, density, and costs are sampled
|
||||||
|
from user-provided probability distributions.
|
||||||
|
"""
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
n_elements: rv_frozen = randint(low=50, high=51),
|
n_elements: rv_frozen = randint(low=50, high=51),
|
||||||
n_sets: rv_frozen = randint(low=100, high=101),
|
n_sets: Union[rv_frozen, Callable] = randint(low=100, high=101),
|
||||||
costs: rv_frozen = uniform(loc=0.0, scale=100.0),
|
costs: rv_frozen = uniform(loc=0.0, scale=100.0),
|
||||||
costs_jitter: rv_frozen = uniform(loc=-5.0, scale=10.0),
|
|
||||||
K: rv_frozen = uniform(loc=25.0, scale=0.0),
|
K: rv_frozen = uniform(loc=25.0, scale=0.0),
|
||||||
density: rv_frozen = uniform(loc=0.02, scale=0.00),
|
density: rv_frozen = uniform(loc=0.02, scale=0.00),
|
||||||
fix_sets: bool = True,
|
|
||||||
) -> None:
|
) -> None:
|
||||||
|
"""Initialize the problem generator.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
n_elements: rv_discrete
|
||||||
|
Probability distribution for number of elements.
|
||||||
|
n_sets: rv_discrete or callable
|
||||||
|
Probability distribution for number of sets, or a callable that takes
|
||||||
|
the number of elements and returns the number of sets.
|
||||||
|
costs: rv_continuous
|
||||||
|
Probability distribution for base set costs.
|
||||||
|
K: rv_continuous
|
||||||
|
Probability distribution for cost scaling factor based on set size.
|
||||||
|
density: rv_continuous
|
||||||
|
Probability distribution for incidence matrix density.
|
||||||
|
"""
|
||||||
|
assert isinstance(
|
||||||
|
n_elements, rv_frozen
|
||||||
|
), "n_elements should be a SciPy probability distribution"
|
||||||
|
assert isinstance(n_sets, rv_frozen) or callable(
|
||||||
|
n_sets
|
||||||
|
), "n_sets should be a SciPy probability distribution or callable"
|
||||||
|
assert isinstance(
|
||||||
|
costs, rv_frozen
|
||||||
|
), "costs should be a SciPy probability distribution"
|
||||||
|
assert isinstance(K, rv_frozen), "K should be a SciPy probability distribution"
|
||||||
|
assert isinstance(
|
||||||
|
density, rv_frozen
|
||||||
|
), "density should be a SciPy probability distribution"
|
||||||
self.gen = SetCoverGenerator(
|
self.gen = SetCoverGenerator(
|
||||||
n_elements=n_elements,
|
n_elements=n_elements,
|
||||||
n_sets=n_sets,
|
n_sets=n_sets,
|
||||||
costs=costs,
|
costs=costs,
|
||||||
costs_jitter=costs_jitter,
|
|
||||||
K=K,
|
K=K,
|
||||||
density=density,
|
density=density,
|
||||||
fix_sets=fix_sets,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def generate(self, n_samples: int) -> List[SetPackData]:
|
def generate(self, n_samples: int) -> List[SetPackData]:
|
||||||
@@ -53,6 +85,47 @@ class SetPackGenerator:
|
|||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
class SetPackPerturber:
|
||||||
|
"""Perturbation generator for existing Set Packing instances.
|
||||||
|
|
||||||
|
Takes an existing SetPackData instance and generates new instances
|
||||||
|
by applying randomization factors to the existing costs while keeping the
|
||||||
|
incidence matrix fixed.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
costs_jitter: rv_frozen = uniform(loc=0.9, scale=0.2),
|
||||||
|
):
|
||||||
|
"""Initialize the perturbation generator.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
costs_jitter: rv_continuous
|
||||||
|
Probability distribution for randomization factors applied to set costs.
|
||||||
|
"""
|
||||||
|
assert isinstance(
|
||||||
|
costs_jitter, rv_frozen
|
||||||
|
), "costs_jitter should be a SciPy probability distribution"
|
||||||
|
self.costs_jitter = costs_jitter
|
||||||
|
|
||||||
|
def perturb(
|
||||||
|
self,
|
||||||
|
instance: SetPackData,
|
||||||
|
n_samples: int,
|
||||||
|
) -> List[SetPackData]:
|
||||||
|
def _sample() -> SetPackData:
|
||||||
|
(_, n_sets) = instance.incidence_matrix.shape
|
||||||
|
jitter_factors = self.costs_jitter.rvs(n_sets)
|
||||||
|
costs = np.round(instance.costs * jitter_factors, 2)
|
||||||
|
return SetPackData(
|
||||||
|
costs=costs,
|
||||||
|
incidence_matrix=instance.incidence_matrix,
|
||||||
|
)
|
||||||
|
|
||||||
|
return [_sample() for _ in range(n_samples)]
|
||||||
|
|
||||||
|
|
||||||
def build_setpack_model_gurobipy(data: Union[str, SetPackData]) -> GurobiModel:
|
def build_setpack_model_gurobipy(data: Union[str, SetPackData]) -> GurobiModel:
|
||||||
if isinstance(data, str):
|
if isinstance(data, str):
|
||||||
data = read_pkl_gz(data)
|
data = read_pkl_gz(data)
|
||||||
|
|||||||
@@ -32,14 +32,10 @@ class MaxWeightStableSetData:
|
|||||||
class MaxWeightStableSetGenerator:
|
class MaxWeightStableSetGenerator:
|
||||||
"""Random instance generator for the Maximum-Weight Stable Set Problem.
|
"""Random instance generator for the Maximum-Weight Stable Set Problem.
|
||||||
|
|
||||||
The generator has two modes of operation. When `fix_graph=True` is provided,
|
Generates instances by creating a new random Erdős-Rényi graph $G_{n,p}$ for each
|
||||||
one random Erdős-Rényi graph $G_{n,p}$ is generated in the constructor, where $n$
|
instance, where $n$ and $p$ are sampled from user-provided probability distributions
|
||||||
and $p$ are sampled from user-provided probability distributions `n` and `p`. To
|
`n` and `p`. For each instance, the generator independently samples each $w_v$ from
|
||||||
generate each instance, the generator independently samples each $w_v$ from the
|
the user-provided probability distribution `w`.
|
||||||
user-provided probability distribution `w`.
|
|
||||||
|
|
||||||
When `fix_graph=False`, a new random graph is generated for each instance; the
|
|
||||||
remaining parameters are sampled in the same way.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
@@ -47,7 +43,6 @@ class MaxWeightStableSetGenerator:
|
|||||||
w: rv_frozen = uniform(loc=10.0, scale=1.0),
|
w: rv_frozen = uniform(loc=10.0, scale=1.0),
|
||||||
n: rv_frozen = randint(low=250, high=251),
|
n: rv_frozen = randint(low=250, high=251),
|
||||||
p: rv_frozen = uniform(loc=0.05, scale=0.0),
|
p: rv_frozen = uniform(loc=0.05, scale=0.0),
|
||||||
fix_graph: bool = True,
|
|
||||||
):
|
):
|
||||||
"""Initialize the problem generator.
|
"""Initialize the problem generator.
|
||||||
|
|
||||||
@@ -66,17 +61,10 @@ class MaxWeightStableSetGenerator:
|
|||||||
self.w = w
|
self.w = w
|
||||||
self.n = n
|
self.n = n
|
||||||
self.p = p
|
self.p = p
|
||||||
self.fix_graph = fix_graph
|
|
||||||
self.graph = None
|
|
||||||
if fix_graph:
|
|
||||||
self.graph = self._generate_graph()
|
|
||||||
|
|
||||||
def generate(self, n_samples: int) -> List[MaxWeightStableSetData]:
|
def generate(self, n_samples: int) -> List[MaxWeightStableSetData]:
|
||||||
def _sample() -> MaxWeightStableSetData:
|
def _sample() -> MaxWeightStableSetData:
|
||||||
if self.graph is not None:
|
graph = self._generate_graph()
|
||||||
graph = self.graph
|
|
||||||
else:
|
|
||||||
graph = self._generate_graph()
|
|
||||||
weights = np.round(self.w.rvs(graph.number_of_nodes()), 2)
|
weights = np.round(self.w.rvs(graph.number_of_nodes()), 2)
|
||||||
return MaxWeightStableSetData(graph, weights)
|
return MaxWeightStableSetData(graph, weights)
|
||||||
|
|
||||||
@@ -86,6 +74,42 @@ class MaxWeightStableSetGenerator:
|
|||||||
return nx.generators.random_graphs.binomial_graph(self.n.rvs(), self.p.rvs())
|
return nx.generators.random_graphs.binomial_graph(self.n.rvs(), self.p.rvs())
|
||||||
|
|
||||||
|
|
||||||
|
class MaxWeightStableSetPerturber:
|
||||||
|
"""Perturbation generator for existing Maximum-Weight Stable Set instances.
|
||||||
|
|
||||||
|
Takes an existing MaxWeightStableSetData instance and generates new instances
|
||||||
|
by applying randomization factors to the existing weights while keeping the graph fixed.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
w_jitter: rv_frozen = uniform(loc=0.9, scale=0.2),
|
||||||
|
):
|
||||||
|
"""Initialize the perturbation generator.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
w_jitter: rv_continuous
|
||||||
|
Probability distribution for randomization factors applied to vertex weights.
|
||||||
|
"""
|
||||||
|
assert isinstance(
|
||||||
|
w_jitter, rv_frozen
|
||||||
|
), "w_jitter should be a SciPy probability distribution"
|
||||||
|
self.w_jitter = w_jitter
|
||||||
|
|
||||||
|
def perturb(
|
||||||
|
self,
|
||||||
|
instance: MaxWeightStableSetData,
|
||||||
|
n_samples: int,
|
||||||
|
) -> List[MaxWeightStableSetData]:
|
||||||
|
def _sample() -> MaxWeightStableSetData:
|
||||||
|
jitter_factors = self.w_jitter.rvs(instance.graph.number_of_nodes())
|
||||||
|
weights = np.round(instance.weights * jitter_factors, 2)
|
||||||
|
return MaxWeightStableSetData(instance.graph, weights)
|
||||||
|
|
||||||
|
return [_sample() for _ in range(n_samples)]
|
||||||
|
|
||||||
|
|
||||||
def build_stab_model_gurobipy(
|
def build_stab_model_gurobipy(
|
||||||
data: Union[str, MaxWeightStableSetData],
|
data: Union[str, MaxWeightStableSetData],
|
||||||
params: Optional[dict[str, Any]] = None,
|
params: Optional[dict[str, Any]] = None,
|
||||||
@@ -105,7 +129,8 @@ def build_stab_model_gurobipy(
|
|||||||
model.addConstr(x[i1] + x[i2] <= 1)
|
model.addConstr(x[i1] + x[i2] <= 1)
|
||||||
|
|
||||||
def cuts_separate(m: GurobiModel) -> List[Hashable]:
|
def cuts_separate(m: GurobiModel) -> List[Hashable]:
|
||||||
x_val = m.inner.cbGetNodeRel(x)
|
x_val_dict = m.inner.cbGetNodeRel(x)
|
||||||
|
x_val = [x_val_dict[i] for i in nodes]
|
||||||
return _stab_separate(data, x_val)
|
return _stab_separate(data, x_val)
|
||||||
|
|
||||||
def cuts_enforce(m: GurobiModel, violations: List[Any]) -> None:
|
def cuts_enforce(m: GurobiModel, violations: List[Any]) -> None:
|
||||||
|
|||||||
@@ -27,10 +27,21 @@ logger = logging.getLogger(__name__)
|
|||||||
class TravelingSalesmanData:
|
class TravelingSalesmanData:
|
||||||
n_cities: int
|
n_cities: int
|
||||||
distances: np.ndarray
|
distances: np.ndarray
|
||||||
|
cities: np.ndarray
|
||||||
|
|
||||||
|
|
||||||
class TravelingSalesmanGenerator:
|
class TravelingSalesmanGenerator:
|
||||||
"""Random generator for the Traveling Salesman Problem."""
|
"""Random instance generator for the Traveling Salesman Problem.
|
||||||
|
|
||||||
|
Generates instances by creating n cities (x_1,y_1),...,(x_n,y_n) where n,
|
||||||
|
x_i and y_i are sampled independently from the provided probability
|
||||||
|
distributions `n`, `x` and `y`. For each (unordered) pair of cities (i,j),
|
||||||
|
the distance d[i,j] between them is set to:
|
||||||
|
|
||||||
|
d[i,j] = gamma[i,j] \\sqrt{(x_i - x_j)^2 + (y_i - y_j)^2}
|
||||||
|
|
||||||
|
where gamma is sampled from the provided probability distribution `gamma`.
|
||||||
|
"""
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
@@ -38,27 +49,10 @@ class TravelingSalesmanGenerator:
|
|||||||
y: rv_frozen = uniform(loc=0.0, scale=1000.0),
|
y: rv_frozen = uniform(loc=0.0, scale=1000.0),
|
||||||
n: rv_frozen = randint(low=100, high=101),
|
n: rv_frozen = randint(low=100, high=101),
|
||||||
gamma: rv_frozen = uniform(loc=1.0, scale=0.0),
|
gamma: rv_frozen = uniform(loc=1.0, scale=0.0),
|
||||||
fix_cities: bool = True,
|
|
||||||
round: bool = True,
|
round: bool = True,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Initializes the problem generator.
|
"""Initializes the problem generator.
|
||||||
|
|
||||||
Initially, the generator creates n cities (x_1,y_1),...,(x_n,y_n) where n,
|
|
||||||
x_i and y_i are sampled independently from the provided probability
|
|
||||||
distributions `n`, `x` and `y`. For each (unordered) pair of cities (i,j),
|
|
||||||
the distance d[i,j] between them is set to:
|
|
||||||
|
|
||||||
d[i,j] = gamma[i,j] \\sqrt{(x_i - x_j)^2 + (y_i - y_j)^2}
|
|
||||||
|
|
||||||
where gamma is sampled from the provided probability distribution `gamma`.
|
|
||||||
|
|
||||||
If fix_cities=True, the list of cities is kept the same for all generated
|
|
||||||
instances. The gamma values, and therefore also the distances, are still
|
|
||||||
different.
|
|
||||||
|
|
||||||
By default, all distances d[i,j] are rounded to the nearest integer. If
|
|
||||||
`round=False` is provided, this rounding will be disabled.
|
|
||||||
|
|
||||||
Arguments
|
Arguments
|
||||||
---------
|
---------
|
||||||
x: rv_continuous
|
x: rv_continuous
|
||||||
@@ -67,9 +61,8 @@ class TravelingSalesmanGenerator:
|
|||||||
Probability distribution for the y-coordinate of each city.
|
Probability distribution for the y-coordinate of each city.
|
||||||
n: rv_discrete
|
n: rv_discrete
|
||||||
Probability distribution for the number of cities.
|
Probability distribution for the number of cities.
|
||||||
fix_cities: bool
|
gamma: rv_continuous
|
||||||
If False, cities will be resampled for every generated instance. Otherwise, list
|
Probability distribution for distance perturbation factors.
|
||||||
of cities will be computed once, during the constructor.
|
|
||||||
round: bool
|
round: bool
|
||||||
If True, distances are rounded to the nearest integer.
|
If True, distances are rounded to the nearest integer.
|
||||||
"""
|
"""
|
||||||
@@ -86,26 +79,11 @@ class TravelingSalesmanGenerator:
|
|||||||
self.gamma = gamma
|
self.gamma = gamma
|
||||||
self.round = round
|
self.round = round
|
||||||
|
|
||||||
if fix_cities:
|
|
||||||
self.fixed_n: Optional[int]
|
|
||||||
self.fixed_cities: Optional[np.ndarray]
|
|
||||||
self.fixed_n, self.fixed_cities = self._generate_cities()
|
|
||||||
else:
|
|
||||||
self.fixed_n = None
|
|
||||||
self.fixed_cities = None
|
|
||||||
|
|
||||||
def generate(self, n_samples: int) -> List[TravelingSalesmanData]:
|
def generate(self, n_samples: int) -> List[TravelingSalesmanData]:
|
||||||
def _sample() -> TravelingSalesmanData:
|
def _sample() -> TravelingSalesmanData:
|
||||||
if self.fixed_cities is not None:
|
n, cities = self._generate_cities()
|
||||||
assert self.fixed_n is not None
|
distances = self._compute_distances(cities, self.gamma, self.round)
|
||||||
n, cities = self.fixed_n, self.fixed_cities
|
return TravelingSalesmanData(n, distances, cities)
|
||||||
else:
|
|
||||||
n, cities = self._generate_cities()
|
|
||||||
distances = squareform(pdist(cities)) * self.gamma.rvs(size=(n, n))
|
|
||||||
distances = np.tril(distances) + np.triu(distances.T, 1)
|
|
||||||
if self.round:
|
|
||||||
distances = distances.round()
|
|
||||||
return TravelingSalesmanData(n, distances)
|
|
||||||
|
|
||||||
return [_sample() for _ in range(n_samples)]
|
return [_sample() for _ in range(n_samples)]
|
||||||
|
|
||||||
@@ -114,6 +92,62 @@ class TravelingSalesmanGenerator:
|
|||||||
cities = np.array([(self.x.rvs(), self.y.rvs()) for _ in range(n)])
|
cities = np.array([(self.x.rvs(), self.y.rvs()) for _ in range(n)])
|
||||||
return n, cities
|
return n, cities
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _compute_distances(
|
||||||
|
cities: np.ndarray, gamma: rv_frozen, round: bool
|
||||||
|
) -> np.ndarray:
|
||||||
|
n = len(cities)
|
||||||
|
distances = squareform(pdist(cities)) * gamma.rvs(size=(n, n))
|
||||||
|
distances = np.tril(distances) + np.triu(distances.T, 1)
|
||||||
|
if round:
|
||||||
|
distances = distances.round()
|
||||||
|
return distances
|
||||||
|
|
||||||
|
|
||||||
|
class TravelingSalesmanPerturber:
|
||||||
|
"""Perturbation generator for existing Traveling Salesman Problem instances.
|
||||||
|
|
||||||
|
Takes an existing TravelingSalesmanData instance and generates new instances
|
||||||
|
by applying randomization factors to the distances computed from the original cities.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
gamma: rv_frozen = uniform(loc=1.0, scale=0.0),
|
||||||
|
round: bool = True,
|
||||||
|
) -> None:
|
||||||
|
"""Initialize the perturbation generator.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
gamma: rv_continuous
|
||||||
|
Probability distribution for randomization factors applied to distances.
|
||||||
|
round: bool
|
||||||
|
If True, perturbed distances are rounded to the nearest integer.
|
||||||
|
"""
|
||||||
|
assert isinstance(
|
||||||
|
gamma, rv_frozen
|
||||||
|
), "gamma should be a SciPy probability distribution"
|
||||||
|
self.gamma = gamma
|
||||||
|
self.round = round
|
||||||
|
|
||||||
|
def perturb(
|
||||||
|
self,
|
||||||
|
instance: TravelingSalesmanData,
|
||||||
|
n_samples: int,
|
||||||
|
) -> List[TravelingSalesmanData]:
|
||||||
|
def _sample() -> TravelingSalesmanData:
|
||||||
|
new_distances = TravelingSalesmanGenerator._compute_distances(
|
||||||
|
instance.cities,
|
||||||
|
self.gamma,
|
||||||
|
self.round,
|
||||||
|
)
|
||||||
|
return TravelingSalesmanData(
|
||||||
|
instance.n_cities, new_distances, instance.cities
|
||||||
|
)
|
||||||
|
|
||||||
|
return [_sample() for _ in range(n_samples)]
|
||||||
|
|
||||||
|
|
||||||
def build_tsp_model_gurobipy(
|
def build_tsp_model_gurobipy(
|
||||||
data: Union[str, TravelingSalesmanData],
|
data: Union[str, TravelingSalesmanData],
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
|
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from math import pi
|
from math import pi
|
||||||
from typing import List, Optional, Union
|
from typing import List, Optional, Union, Callable
|
||||||
|
|
||||||
import gurobipy as gp
|
import gurobipy as gp
|
||||||
import numpy as np
|
import numpy as np
|
||||||
@@ -25,75 +25,102 @@ class UnitCommitmentData:
|
|||||||
min_downtime: np.ndarray
|
min_downtime: np.ndarray
|
||||||
cost_startup: np.ndarray
|
cost_startup: np.ndarray
|
||||||
cost_prod: np.ndarray
|
cost_prod: np.ndarray
|
||||||
|
cost_prod_quad: np.ndarray
|
||||||
cost_fixed: np.ndarray
|
cost_fixed: np.ndarray
|
||||||
|
|
||||||
|
|
||||||
class UnitCommitmentGenerator:
|
class UnitCommitmentGenerator:
|
||||||
|
"""Random instance generator for the Unit Commitment Problem.
|
||||||
|
|
||||||
|
Generates instances by creating new random unit commitment problems with
|
||||||
|
parameters sampled from user-provided probability distributions.
|
||||||
|
"""
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
n_units: rv_frozen = randint(low=1_000, high=1_001),
|
n_units: rv_frozen = randint(low=1_000, high=1_001),
|
||||||
n_periods: rv_frozen = randint(low=72, high=73),
|
n_periods: Union[rv_frozen, Callable] = randint(low=72, high=73),
|
||||||
max_power: rv_frozen = uniform(loc=50, scale=450),
|
max_power: rv_frozen = uniform(loc=50, scale=450),
|
||||||
min_power: rv_frozen = uniform(loc=0.5, scale=0.25),
|
min_power: rv_frozen = uniform(loc=0.5, scale=0.25),
|
||||||
cost_startup: rv_frozen = uniform(loc=0, scale=10_000),
|
cost_startup: rv_frozen = uniform(loc=0, scale=10_000),
|
||||||
cost_prod: rv_frozen = uniform(loc=0, scale=50),
|
cost_prod: rv_frozen = uniform(loc=0, scale=50),
|
||||||
|
cost_prod_quad: rv_frozen = uniform(loc=0, scale=0),
|
||||||
cost_fixed: rv_frozen = uniform(loc=0, scale=1_000),
|
cost_fixed: rv_frozen = uniform(loc=0, scale=1_000),
|
||||||
min_uptime: rv_frozen = randint(low=2, high=8),
|
min_uptime: rv_frozen = randint(low=2, high=8),
|
||||||
min_downtime: rv_frozen = randint(low=2, high=8),
|
min_downtime: rv_frozen = randint(low=2, high=8),
|
||||||
cost_jitter: rv_frozen = uniform(loc=0.75, scale=0.5),
|
|
||||||
demand_jitter: rv_frozen = uniform(loc=0.9, scale=0.2),
|
|
||||||
fix_units: bool = False,
|
|
||||||
) -> None:
|
) -> None:
|
||||||
|
"""Initialize the problem generator.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
n_units: rv_frozen
|
||||||
|
Probability distribution for number of units.
|
||||||
|
n_periods: rv_frozen or callable
|
||||||
|
Probability distribution for number of periods, or a callable that takes
|
||||||
|
the number of units and returns the number of periods.
|
||||||
|
max_power: rv_frozen
|
||||||
|
Probability distribution for maximum power output.
|
||||||
|
min_power: rv_frozen
|
||||||
|
Probability distribution for minimum power output (as fraction of max_power).
|
||||||
|
cost_startup: rv_frozen
|
||||||
|
Probability distribution for startup costs.
|
||||||
|
cost_prod: rv_frozen
|
||||||
|
Probability distribution for production costs.
|
||||||
|
cost_prod_quad: rv_frozen
|
||||||
|
Probability distribution for quadratic production costs.
|
||||||
|
cost_fixed: rv_frozen
|
||||||
|
Probability distribution for fixed costs.
|
||||||
|
min_uptime: rv_frozen
|
||||||
|
Probability distribution for minimum uptime.
|
||||||
|
min_downtime: rv_frozen
|
||||||
|
Probability distribution for minimum downtime.
|
||||||
|
"""
|
||||||
|
assert isinstance(
|
||||||
|
n_units, rv_frozen
|
||||||
|
), "n_units should be a SciPy probability distribution"
|
||||||
|
assert isinstance(n_periods, rv_frozen) or callable(
|
||||||
|
n_periods
|
||||||
|
), "n_periods should be a SciPy probability distribution or callable"
|
||||||
self.n_units = n_units
|
self.n_units = n_units
|
||||||
self.n_periods = n_periods
|
self.n_periods = n_periods
|
||||||
self.max_power = max_power
|
self.max_power = max_power
|
||||||
self.min_power = min_power
|
self.min_power = min_power
|
||||||
self.cost_startup = cost_startup
|
self.cost_startup = cost_startup
|
||||||
self.cost_prod = cost_prod
|
self.cost_prod = cost_prod
|
||||||
|
self.cost_prod_quad = cost_prod_quad
|
||||||
self.cost_fixed = cost_fixed
|
self.cost_fixed = cost_fixed
|
||||||
self.min_uptime = min_uptime
|
self.min_uptime = min_uptime
|
||||||
self.min_downtime = min_downtime
|
self.min_downtime = min_downtime
|
||||||
self.cost_jitter = cost_jitter
|
|
||||||
self.demand_jitter = demand_jitter
|
|
||||||
self.fix_units = fix_units
|
|
||||||
self.ref_data: Optional[UnitCommitmentData] = None
|
|
||||||
|
|
||||||
def generate(self, n_samples: int) -> List[UnitCommitmentData]:
|
def generate(self, n_samples: int) -> List[UnitCommitmentData]:
|
||||||
def _sample() -> UnitCommitmentData:
|
def _sample() -> UnitCommitmentData:
|
||||||
if self.ref_data is None:
|
G = self.n_units.rvs()
|
||||||
T = self.n_periods.rvs()
|
if callable(self.n_periods):
|
||||||
G = self.n_units.rvs()
|
T = self.n_periods(G)
|
||||||
|
|
||||||
# Generate unit parameteres
|
|
||||||
max_power = self.max_power.rvs(G)
|
|
||||||
min_power = max_power * self.min_power.rvs(G)
|
|
||||||
max_power = max_power
|
|
||||||
min_uptime = self.min_uptime.rvs(G)
|
|
||||||
min_downtime = self.min_downtime.rvs(G)
|
|
||||||
cost_startup = self.cost_startup.rvs(G)
|
|
||||||
cost_prod = self.cost_prod.rvs(G)
|
|
||||||
cost_fixed = self.cost_fixed.rvs(G)
|
|
||||||
capacity = max_power.sum()
|
|
||||||
|
|
||||||
# Generate periodic demand in the range [0.4, 0.8] * capacity, with a peak every 12 hours.
|
|
||||||
demand = np.sin([i / 6 * pi for i in range(T)])
|
|
||||||
demand *= uniform(loc=0, scale=1).rvs(T)
|
|
||||||
demand -= demand.min()
|
|
||||||
demand /= demand.max() / 0.4
|
|
||||||
demand += 0.4
|
|
||||||
demand *= capacity
|
|
||||||
else:
|
else:
|
||||||
T, G = len(self.ref_data.demand), len(self.ref_data.max_power)
|
T = self.n_periods.rvs()
|
||||||
demand = self.ref_data.demand * self.demand_jitter.rvs(T)
|
|
||||||
min_power = self.ref_data.min_power
|
|
||||||
max_power = self.ref_data.max_power
|
|
||||||
min_uptime = self.ref_data.min_uptime
|
|
||||||
min_downtime = self.ref_data.min_downtime
|
|
||||||
cost_startup = self.ref_data.cost_startup * self.cost_jitter.rvs(G)
|
|
||||||
cost_prod = self.ref_data.cost_prod * self.cost_jitter.rvs(G)
|
|
||||||
cost_fixed = self.ref_data.cost_fixed * self.cost_jitter.rvs(G)
|
|
||||||
|
|
||||||
data = UnitCommitmentData(
|
# Generate unit parameteres
|
||||||
|
max_power = self.max_power.rvs(G)
|
||||||
|
min_power = max_power * self.min_power.rvs(G)
|
||||||
|
max_power = max_power
|
||||||
|
min_uptime = self.min_uptime.rvs(G)
|
||||||
|
min_downtime = self.min_downtime.rvs(G)
|
||||||
|
cost_startup = self.cost_startup.rvs(G)
|
||||||
|
cost_prod = self.cost_prod.rvs(G)
|
||||||
|
cost_prod_quad = self.cost_prod_quad.rvs(G)
|
||||||
|
cost_fixed = self.cost_fixed.rvs(G)
|
||||||
|
capacity = max_power.sum()
|
||||||
|
|
||||||
|
# Generate periodic demand in the range [0.4, 0.8] * capacity, with a peak every 12 hours.
|
||||||
|
demand = np.sin([i / 6 * pi for i in range(T)])
|
||||||
|
demand *= uniform(loc=0, scale=1).rvs(T)
|
||||||
|
demand -= demand.min()
|
||||||
|
demand /= demand.max() / 0.4
|
||||||
|
demand += 0.4
|
||||||
|
demand *= capacity
|
||||||
|
|
||||||
|
return UnitCommitmentData(
|
||||||
demand.round(2),
|
demand.round(2),
|
||||||
min_power.round(2),
|
min_power.round(2),
|
||||||
max_power.round(2),
|
max_power.round(2),
|
||||||
@@ -101,13 +128,68 @@ class UnitCommitmentGenerator:
|
|||||||
min_downtime,
|
min_downtime,
|
||||||
cost_startup.round(2),
|
cost_startup.round(2),
|
||||||
cost_prod.round(2),
|
cost_prod.round(2),
|
||||||
|
cost_prod_quad.round(4),
|
||||||
cost_fixed.round(2),
|
cost_fixed.round(2),
|
||||||
)
|
)
|
||||||
|
|
||||||
if self.ref_data is None and self.fix_units:
|
return [_sample() for _ in range(n_samples)]
|
||||||
self.ref_data = data
|
|
||||||
|
|
||||||
return data
|
|
||||||
|
class UnitCommitmentPerturber:
|
||||||
|
"""Perturbation generator for existing Unit Commitment instances.
|
||||||
|
|
||||||
|
Takes an existing UnitCommitmentData instance and generates new instances
|
||||||
|
by applying randomization factors to the existing costs and demand while
|
||||||
|
keeping the unit structure fixed.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
cost_jitter: rv_frozen = uniform(loc=0.75, scale=0.5),
|
||||||
|
demand_jitter: rv_frozen = uniform(loc=0.9, scale=0.2),
|
||||||
|
) -> None:
|
||||||
|
"""Initialize the perturbation generator.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
cost_jitter: rv_frozen
|
||||||
|
Probability distribution for randomization factors applied to costs.
|
||||||
|
demand_jitter: rv_frozen
|
||||||
|
Probability distribution for randomization factors applied to demand.
|
||||||
|
"""
|
||||||
|
assert isinstance(
|
||||||
|
cost_jitter, rv_frozen
|
||||||
|
), "cost_jitter should be a SciPy probability distribution"
|
||||||
|
assert isinstance(
|
||||||
|
demand_jitter, rv_frozen
|
||||||
|
), "demand_jitter should be a SciPy probability distribution"
|
||||||
|
self.cost_jitter = cost_jitter
|
||||||
|
self.demand_jitter = demand_jitter
|
||||||
|
|
||||||
|
def perturb(
|
||||||
|
self,
|
||||||
|
instance: UnitCommitmentData,
|
||||||
|
n_samples: int,
|
||||||
|
) -> List[UnitCommitmentData]:
|
||||||
|
def _sample() -> UnitCommitmentData:
|
||||||
|
T, G = len(instance.demand), len(instance.max_power)
|
||||||
|
demand = instance.demand * self.demand_jitter.rvs(T)
|
||||||
|
cost_startup = instance.cost_startup * self.cost_jitter.rvs(G)
|
||||||
|
cost_prod = instance.cost_prod * self.cost_jitter.rvs(G)
|
||||||
|
cost_prod_quad = instance.cost_prod_quad * self.cost_jitter.rvs(G)
|
||||||
|
cost_fixed = instance.cost_fixed * self.cost_jitter.rvs(G)
|
||||||
|
|
||||||
|
return UnitCommitmentData(
|
||||||
|
demand.round(2),
|
||||||
|
instance.min_power,
|
||||||
|
instance.max_power,
|
||||||
|
instance.min_uptime,
|
||||||
|
instance.min_downtime,
|
||||||
|
cost_startup.round(2),
|
||||||
|
cost_prod.round(2),
|
||||||
|
cost_prod_quad.round(4),
|
||||||
|
cost_fixed.round(2),
|
||||||
|
)
|
||||||
|
|
||||||
return [_sample() for _ in range(n_samples)]
|
return [_sample() for _ in range(n_samples)]
|
||||||
|
|
||||||
@@ -143,6 +225,7 @@ def build_uc_model_gurobipy(data: Union[str, UnitCommitmentData]) -> GurobiModel
|
|||||||
is_on[g, t] * data.cost_fixed[g]
|
is_on[g, t] * data.cost_fixed[g]
|
||||||
+ switch_on[g, t] * data.cost_startup[g]
|
+ switch_on[g, t] * data.cost_startup[g]
|
||||||
+ prod[g, t] * data.cost_prod[g]
|
+ prod[g, t] * data.cost_prod[g]
|
||||||
|
+ prod[g, t] * prod[g, t] * data.cost_prod_quad[g]
|
||||||
for g in range(G)
|
for g in range(G)
|
||||||
for t in range(T)
|
for t in range(T)
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -12,7 +12,11 @@ from networkx import Graph
|
|||||||
from scipy.stats import uniform, randint
|
from scipy.stats import uniform, randint
|
||||||
from scipy.stats.distributions import rv_frozen
|
from scipy.stats.distributions import rv_frozen
|
||||||
|
|
||||||
from .stab import MaxWeightStableSetGenerator
|
from .stab import (
|
||||||
|
MaxWeightStableSetGenerator,
|
||||||
|
MaxWeightStableSetPerturber,
|
||||||
|
MaxWeightStableSetData,
|
||||||
|
)
|
||||||
from miplearn.solvers.gurobi import GurobiModel
|
from miplearn.solvers.gurobi import GurobiModel
|
||||||
from ..io import read_pkl_gz
|
from ..io import read_pkl_gz
|
||||||
|
|
||||||
@@ -24,14 +28,35 @@ class MinWeightVertexCoverData:
|
|||||||
|
|
||||||
|
|
||||||
class MinWeightVertexCoverGenerator:
|
class MinWeightVertexCoverGenerator:
|
||||||
|
"""Random instance generator for the Minimum-Weight Vertex Cover Problem.
|
||||||
|
|
||||||
|
Generates instances by creating a new random Erdős-Rényi graph $G_{n,p}$ for each
|
||||||
|
instance, where $n$ and $p$ are sampled from user-provided probability distributions
|
||||||
|
`n` and `p`. For each instance, the generator independently samples each $w_v$ from
|
||||||
|
the user-provided probability distribution `w`.
|
||||||
|
"""
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
w: rv_frozen = uniform(loc=10.0, scale=1.0),
|
w: rv_frozen = uniform(loc=10.0, scale=1.0),
|
||||||
n: rv_frozen = randint(low=250, high=251),
|
n: rv_frozen = randint(low=250, high=251),
|
||||||
p: rv_frozen = uniform(loc=0.05, scale=0.0),
|
p: rv_frozen = uniform(loc=0.05, scale=0.0),
|
||||||
fix_graph: bool = True,
|
|
||||||
):
|
):
|
||||||
self._generator = MaxWeightStableSetGenerator(w, n, p, fix_graph)
|
"""Initialize the problem generator.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
w: rv_continuous
|
||||||
|
Probability distribution for vertex weights.
|
||||||
|
n: rv_discrete
|
||||||
|
Probability distribution for parameter $n$ in Erdős-Rényi model.
|
||||||
|
p: rv_continuous
|
||||||
|
Probability distribution for parameter $p$ in Erdős-Rényi model.
|
||||||
|
"""
|
||||||
|
assert isinstance(w, rv_frozen), "w should be a SciPy probability distribution"
|
||||||
|
assert isinstance(n, rv_frozen), "n should be a SciPy probability distribution"
|
||||||
|
assert isinstance(p, rv_frozen), "p should be a SciPy probability distribution"
|
||||||
|
self._generator = MaxWeightStableSetGenerator(w, n, p)
|
||||||
|
|
||||||
def generate(self, n_samples: int) -> List[MinWeightVertexCoverData]:
|
def generate(self, n_samples: int) -> List[MinWeightVertexCoverData]:
|
||||||
return [
|
return [
|
||||||
@@ -40,6 +65,38 @@ class MinWeightVertexCoverGenerator:
|
|||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
class MinWeightVertexCoverPerturber:
|
||||||
|
"""Perturbation generator for existing Minimum-Weight Vertex Cover instances.
|
||||||
|
|
||||||
|
Takes an existing MinWeightVertexCoverData instance and generates new instances
|
||||||
|
by applying randomization factors to the existing weights while keeping the graph fixed.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
w_jitter: rv_frozen = uniform(loc=0.9, scale=0.2),
|
||||||
|
):
|
||||||
|
"""Initialize the perturbation generator.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
w_jitter: rv_continuous
|
||||||
|
Probability distribution for randomization factors applied to vertex weights.
|
||||||
|
"""
|
||||||
|
self._perturber = MaxWeightStableSetPerturber(w_jitter)
|
||||||
|
|
||||||
|
def perturb(
|
||||||
|
self,
|
||||||
|
instance: MinWeightVertexCoverData,
|
||||||
|
n_samples: int,
|
||||||
|
) -> List[MinWeightVertexCoverData]:
|
||||||
|
stab_instance = MaxWeightStableSetData(instance.graph, instance.weights)
|
||||||
|
perturbed_instances = self._perturber.perturb(stab_instance, n_samples)
|
||||||
|
return [
|
||||||
|
MinWeightVertexCoverData(s.graph, s.weights) for s in perturbed_instances
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
def build_vertexcover_model_gurobipy(
|
def build_vertexcover_model_gurobipy(
|
||||||
data: Union[str, MinWeightVertexCoverData]
|
data: Union[str, MinWeightVertexCoverData]
|
||||||
) -> GurobiModel:
|
) -> GurobiModel:
|
||||||
|
|||||||
@@ -4,10 +4,10 @@
|
|||||||
|
|
||||||
import logging
|
import logging
|
||||||
import json
|
import json
|
||||||
from typing import Dict, Optional, Callable, Any, List
|
from typing import Dict, Optional, Callable, Any, List, Sequence
|
||||||
|
|
||||||
import gurobipy as gp
|
import gurobipy as gp
|
||||||
from gurobipy import GRB, GurobiError
|
from gurobipy import GRB, GurobiError, Var
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from scipy.sparse import lil_matrix
|
from scipy.sparse import lil_matrix
|
||||||
|
|
||||||
@@ -109,7 +109,11 @@ class GurobiModel(AbstractModel):
|
|||||||
assert constrs_sense.shape == (nconstrs,)
|
assert constrs_sense.shape == (nconstrs,)
|
||||||
assert constrs_rhs.shape == (nconstrs,)
|
assert constrs_rhs.shape == (nconstrs,)
|
||||||
|
|
||||||
gp_vars = [self.inner.getVarByName(var_name.decode()) for var_name in var_names]
|
gp_vars: list[Var] = []
|
||||||
|
for var_name in var_names:
|
||||||
|
v = self.inner.getVarByName(var_name.decode())
|
||||||
|
assert v is not None, f"unknown var: {var_name}"
|
||||||
|
gp_vars.append(v)
|
||||||
self.inner.addMConstr(constrs_lhs, gp_vars, constrs_sense, constrs_rhs)
|
self.inner.addMConstr(constrs_lhs, gp_vars, constrs_sense, constrs_rhs)
|
||||||
|
|
||||||
if stats is not None:
|
if stats is not None:
|
||||||
@@ -188,9 +192,10 @@ class GurobiModel(AbstractModel):
|
|||||||
var_val = var_values[var_idx]
|
var_val = var_values[var_idx]
|
||||||
if np.isfinite(var_val):
|
if np.isfinite(var_val):
|
||||||
var = self.inner.getVarByName(var_name.decode())
|
var = self.inner.getVarByName(var_name.decode())
|
||||||
var.vtype = "C"
|
assert var is not None, f"unknown var: {var_name}"
|
||||||
var.lb = var_val
|
var.VType = "c"
|
||||||
var.ub = var_val
|
var.LB = var_val
|
||||||
|
var.UB = var_val
|
||||||
n_fixed += 1
|
n_fixed += 1
|
||||||
if stats is not None:
|
if stats is not None:
|
||||||
stats["Fixed variables"] = n_fixed
|
stats["Fixed variables"] = n_fixed
|
||||||
@@ -213,7 +218,7 @@ class GurobiModel(AbstractModel):
|
|||||||
return GurobiModel(self.inner.relax())
|
return GurobiModel(self.inner.relax())
|
||||||
|
|
||||||
def set_time_limit(self, time_limit_sec: float) -> None:
|
def set_time_limit(self, time_limit_sec: float) -> None:
|
||||||
self.inner.params.timeLimit = time_limit_sec
|
self.inner.params.TimeLimit = time_limit_sec
|
||||||
|
|
||||||
def set_warm_starts(
|
def set_warm_starts(
|
||||||
self,
|
self,
|
||||||
@@ -228,12 +233,13 @@ class GurobiModel(AbstractModel):
|
|||||||
|
|
||||||
self.inner.numStart = n_starts
|
self.inner.numStart = n_starts
|
||||||
for start_idx in range(n_starts):
|
for start_idx in range(n_starts):
|
||||||
self.inner.params.startNumber = start_idx
|
self.inner.params.StartNumber = start_idx
|
||||||
for var_idx, var_name in enumerate(var_names):
|
for var_idx, var_name in enumerate(var_names):
|
||||||
var_val = var_values[start_idx, var_idx]
|
var_val = var_values[start_idx, var_idx]
|
||||||
if np.isfinite(var_val):
|
if np.isfinite(var_val):
|
||||||
var = self.inner.getVarByName(var_name.decode())
|
var = self.inner.getVarByName(var_name.decode())
|
||||||
var.start = var_val
|
assert var is not None, f"unknown var: {var_name}"
|
||||||
|
var.Start = var_val
|
||||||
|
|
||||||
if stats is not None:
|
if stats is not None:
|
||||||
stats["WS: Count"] = n_starts
|
stats["WS: Count"] = n_starts
|
||||||
@@ -258,6 +264,13 @@ class GurobiModel(AbstractModel):
|
|||||||
h5.put_array(
|
h5.put_array(
|
||||||
h5_field, np.array(self.inner.getAttr(gp_field, gp_vars), dtype=float)
|
h5_field, np.array(self.inner.getAttr(gp_field, gp_vars), dtype=float)
|
||||||
)
|
)
|
||||||
|
obj = self.inner.getObjective()
|
||||||
|
if isinstance(obj, gp.QuadExpr):
|
||||||
|
nvars = len(self.inner.getVars())
|
||||||
|
obj_q = np.zeros((nvars, nvars))
|
||||||
|
for i in range(obj.size()):
|
||||||
|
obj_q[obj.getVar1(i).index, obj.getVar2(i).index] = obj.getCoeff(i)
|
||||||
|
h5.put_array("static_var_obj_coeffs_quad", obj_q)
|
||||||
|
|
||||||
def _extract_after_load_constrs(self, h5: H5File) -> None:
|
def _extract_after_load_constrs(self, h5: H5File) -> None:
|
||||||
gp_constrs = self.inner.getConstrs()
|
gp_constrs = self.inner.getConstrs()
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
# Released under the modified BSD license. See COPYING.md for more details.
|
# Released under the modified BSD license. See COPYING.md for more details.
|
||||||
from os.path import exists
|
from os.path import exists
|
||||||
from tempfile import NamedTemporaryFile
|
from tempfile import NamedTemporaryFile
|
||||||
from typing import List, Any, Union, Dict, Callable, Optional
|
from typing import List, Any, Union, Dict, Callable, Optional, Tuple
|
||||||
|
|
||||||
from miplearn.h5 import H5File
|
from miplearn.h5 import H5File
|
||||||
from miplearn.io import _to_h5_filename
|
from miplearn.io import _to_h5_filename
|
||||||
@@ -25,7 +25,7 @@ class LearningSolver:
|
|||||||
self,
|
self,
|
||||||
model: Union[str, AbstractModel],
|
model: Union[str, AbstractModel],
|
||||||
build_model: Optional[Callable] = None,
|
build_model: Optional[Callable] = None,
|
||||||
) -> Dict[str, Any]:
|
) -> Tuple[AbstractModel, Dict[str, Any]]:
|
||||||
h5_filename, mode = NamedTemporaryFile().name, "w"
|
h5_filename, mode = NamedTemporaryFile().name, "w"
|
||||||
if isinstance(model, str):
|
if isinstance(model, str):
|
||||||
assert build_model is not None
|
assert build_model is not None
|
||||||
@@ -47,8 +47,10 @@ class LearningSolver:
|
|||||||
relaxed.optimize()
|
relaxed.optimize()
|
||||||
relaxed.extract_after_lp(h5)
|
relaxed.extract_after_lp(h5)
|
||||||
for comp in self.components:
|
for comp in self.components:
|
||||||
comp.before_mip(h5_filename, model, stats)
|
comp_stats = comp.before_mip(h5_filename, model, stats)
|
||||||
|
if comp_stats is not None:
|
||||||
|
stats.update(comp_stats)
|
||||||
model.optimize()
|
model.optimize()
|
||||||
model.extract_after_mip(h5)
|
model.extract_after_mip(h5)
|
||||||
|
|
||||||
return stats
|
return model, stats
|
||||||
|
|||||||
@@ -8,7 +8,8 @@ import numpy as np
|
|||||||
import pyomo
|
import pyomo
|
||||||
import pyomo.environ as pe
|
import pyomo.environ as pe
|
||||||
from pyomo.core import Objective, Var, Suffix
|
from pyomo.core import Objective, Var, Suffix
|
||||||
from pyomo.core.base import _GeneralVarData
|
from pyomo.core.base import VarData
|
||||||
|
from pyomo.core.expr import ProductExpression
|
||||||
from pyomo.core.expr.numeric_expr import SumExpression, MonomialTermExpression
|
from pyomo.core.expr.numeric_expr import SumExpression, MonomialTermExpression
|
||||||
from scipy.sparse import coo_matrix
|
from scipy.sparse import coo_matrix
|
||||||
|
|
||||||
@@ -207,19 +208,23 @@ class PyomoModel(AbstractModel):
|
|||||||
lower_bounds: List[float] = []
|
lower_bounds: List[float] = []
|
||||||
obj_coeffs: List[float] = []
|
obj_coeffs: List[float] = []
|
||||||
|
|
||||||
obj = None
|
obj_quad, obj_linear = None, None
|
||||||
obj_offset = 0.0
|
obj_offset = 0.0
|
||||||
obj_count = 0
|
obj_count = 0
|
||||||
for obj in self.inner.component_objects(Objective):
|
for obj in self.inner.component_objects(Objective):
|
||||||
obj, obj_offset = self._parse_pyomo_expr(obj.expr)
|
obj_quad, obj_linear, obj_offset = self._parse_obj_expr(obj.expr)
|
||||||
obj_count += 1
|
obj_count += 1
|
||||||
assert obj_count == 1, f"One objective function expected; found {obj_count}"
|
assert obj_count == 1, f"One objective function expected; found {obj_count}"
|
||||||
|
assert obj_quad is not None
|
||||||
|
assert obj_linear is not None
|
||||||
|
|
||||||
|
varname_to_idx: Dict[str, int] = {}
|
||||||
for i, var in enumerate(self.inner.component_objects(pyomo.core.Var)):
|
for i, var in enumerate(self.inner.component_objects(pyomo.core.Var)):
|
||||||
for idx in var:
|
for idx in var:
|
||||||
v = var[idx]
|
v = var[idx]
|
||||||
|
|
||||||
# Variable name
|
# Variable name
|
||||||
|
varname_to_idx[v.name] = len(names)
|
||||||
if idx is None:
|
if idx is None:
|
||||||
names.append(var.name)
|
names.append(var.name)
|
||||||
else:
|
else:
|
||||||
@@ -249,11 +254,22 @@ class PyomoModel(AbstractModel):
|
|||||||
lower_bounds.append(float(lb))
|
lower_bounds.append(float(lb))
|
||||||
|
|
||||||
# Objective coefficients
|
# Objective coefficients
|
||||||
if v.name in obj:
|
if v.name in obj_linear:
|
||||||
obj_coeffs.append(obj[v.name])
|
obj_coeffs.append(obj_linear[v.name])
|
||||||
else:
|
else:
|
||||||
obj_coeffs.append(0.0)
|
obj_coeffs.append(0.0)
|
||||||
|
|
||||||
|
if len(obj_quad) > 0:
|
||||||
|
nvars = len(names)
|
||||||
|
matrix = np.zeros((nvars, nvars))
|
||||||
|
for (left_varname, right_varname), coeff in obj_quad.items():
|
||||||
|
assert left_varname in varname_to_idx
|
||||||
|
assert right_varname in varname_to_idx
|
||||||
|
left_idx = varname_to_idx[left_varname]
|
||||||
|
right_idx = varname_to_idx[right_varname]
|
||||||
|
matrix[left_idx, right_idx] = coeff
|
||||||
|
h5.put_array("static_var_obj_coeffs_quad", matrix)
|
||||||
|
|
||||||
h5.put_array("static_var_names", np.array(names, dtype="S"))
|
h5.put_array("static_var_names", np.array(names, dtype="S"))
|
||||||
h5.put_array("static_var_types", np.array(types, dtype="S"))
|
h5.put_array("static_var_types", np.array(types, dtype="S"))
|
||||||
h5.put_array("static_var_lower_bounds", np.array(lower_bounds))
|
h5.put_array("static_var_lower_bounds", np.array(lower_bounds))
|
||||||
@@ -302,13 +318,13 @@ class PyomoModel(AbstractModel):
|
|||||||
lhs_row.append(row)
|
lhs_row.append(row)
|
||||||
lhs_col.append(varname_to_idx[term._args_[1].name])
|
lhs_col.append(varname_to_idx[term._args_[1].name])
|
||||||
lhs_data.append(float(term._args_[0]))
|
lhs_data.append(float(term._args_[0]))
|
||||||
elif isinstance(term, _GeneralVarData):
|
elif isinstance(term, VarData):
|
||||||
lhs_row.append(row)
|
lhs_row.append(row)
|
||||||
lhs_col.append(varname_to_idx[term.name])
|
lhs_col.append(varname_to_idx[term.name])
|
||||||
lhs_data.append(1.0)
|
lhs_data.append(1.0)
|
||||||
else:
|
else:
|
||||||
raise Exception(f"Unknown term type: {term.__class__.__name__}")
|
raise Exception(f"Unknown term type: {term.__class__.__name__}")
|
||||||
elif isinstance(expr, _GeneralVarData):
|
elif isinstance(expr, VarData):
|
||||||
lhs_row.append(row)
|
lhs_row.append(row)
|
||||||
lhs_col.append(varname_to_idx[expr.name])
|
lhs_col.append(varname_to_idx[expr.name])
|
||||||
lhs_data.append(1.0)
|
lhs_data.append(1.0)
|
||||||
@@ -327,8 +343,9 @@ class PyomoModel(AbstractModel):
|
|||||||
_parse_constraint(constr, curr_row)
|
_parse_constraint(constr, curr_row)
|
||||||
curr_row += 1
|
curr_row += 1
|
||||||
|
|
||||||
lhs = coo_matrix((lhs_data, (lhs_row, lhs_col))).tocoo()
|
if len(lhs_data) > 0:
|
||||||
h5.put_sparse("static_constr_lhs", lhs)
|
lhs = coo_matrix((lhs_data, (lhs_row, lhs_col))).tocoo()
|
||||||
|
h5.put_sparse("static_constr_lhs", lhs)
|
||||||
h5.put_array("static_constr_names", np.array(names, dtype="S"))
|
h5.put_array("static_constr_names", np.array(names, dtype="S"))
|
||||||
h5.put_array("static_constr_rhs", np.array(rhs))
|
h5.put_array("static_constr_rhs", np.array(rhs))
|
||||||
h5.put_array("static_constr_sense", np.array(senses, dtype="S"))
|
h5.put_array("static_constr_sense", np.array(senses, dtype="S"))
|
||||||
@@ -372,24 +389,47 @@ class PyomoModel(AbstractModel):
|
|||||||
slacks.append(abs(self.inner.slack[c]))
|
slacks.append(abs(self.inner.slack[c]))
|
||||||
h5.put_array("mip_constr_slacks", np.array(slacks))
|
h5.put_array("mip_constr_slacks", np.array(slacks))
|
||||||
|
|
||||||
def _parse_pyomo_expr(self, expr: Any) -> Tuple[Dict[str, float], float]:
|
def _parse_term(self, t: Any) -> Tuple[str, float]:
|
||||||
lhs = {}
|
if isinstance(t, MonomialTermExpression):
|
||||||
offset = 0.0
|
return t._args_[1].name, float(t._args_[0])
|
||||||
|
elif isinstance(t, VarData):
|
||||||
|
return t.name, 1.0
|
||||||
|
else:
|
||||||
|
raise Exception(f"Unknown term type: {t.__class__.__name__}")
|
||||||
|
|
||||||
|
def _parse_obj_expr(
|
||||||
|
self, expr: Any
|
||||||
|
) -> Tuple[Dict[Tuple[str, str], float], Dict[str, float], float]:
|
||||||
|
obj_coeff_linear = {}
|
||||||
|
obj_coeff_quadratic = {}
|
||||||
|
obj_offset = 0.0
|
||||||
if isinstance(expr, SumExpression):
|
if isinstance(expr, SumExpression):
|
||||||
for term in expr._args_:
|
for term in expr._args_:
|
||||||
if isinstance(term, MonomialTermExpression):
|
if isinstance(term, (int, float)):
|
||||||
lhs[term._args_[1].name] = float(term._args_[0])
|
# Constant term
|
||||||
elif isinstance(term, _GeneralVarData):
|
obj_offset += term
|
||||||
lhs[term.name] = 1.0
|
elif isinstance(term, (MonomialTermExpression, VarData)):
|
||||||
elif isinstance(term, float):
|
# Linear term
|
||||||
offset += term
|
var_name, var_coeff = self._parse_term(term)
|
||||||
|
if var_name not in obj_coeff_linear:
|
||||||
|
obj_coeff_linear[var_name] = 0.0
|
||||||
|
obj_coeff_linear[var_name] += var_coeff
|
||||||
|
elif isinstance(term, ProductExpression):
|
||||||
|
# Quadratic terms
|
||||||
|
left_var_nane, left_coeff = self._parse_term(term._args_[0])
|
||||||
|
right_var_nane, right_coeff = self._parse_term(term._args_[1])
|
||||||
|
if (left_var_nane, right_var_nane) not in obj_coeff_quadratic:
|
||||||
|
obj_coeff_quadratic[(left_var_nane, right_var_nane)] = 0.0
|
||||||
|
obj_coeff_quadratic[(left_var_nane, right_var_nane)] += (
|
||||||
|
left_coeff * right_coeff
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
raise Exception(f"Unknown term type: {term.__class__.__name__}")
|
raise Exception(f"Unknown term type: {term.__class__.__name__}")
|
||||||
elif isinstance(expr, _GeneralVarData):
|
elif isinstance(expr, VarData):
|
||||||
lhs[expr.name] = 1.0
|
obj_coeff_linear[expr.name] = 1.0
|
||||||
else:
|
else:
|
||||||
raise Exception(f"Unknown expression type: {expr.__class__.__name__}")
|
raise Exception(f"Unknown expression type: {expr.__class__.__name__}")
|
||||||
return lhs, offset
|
return obj_coeff_quadratic, obj_coeff_linear, obj_offset
|
||||||
|
|
||||||
def _gap(self, zp: float, zd: float, tol: float = 1e-6) -> float:
|
def _gap(self, zp: float, zd: float, tol: float = 1e-6) -> float:
|
||||||
# Reference: https://www.gurobi.com/documentation/9.5/refman/mipgap2.html
|
# Reference: https://www.gurobi.com/documentation/9.5/refman/mipgap2.html
|
||||||
|
|||||||
18
setup.py
18
setup.py
@@ -6,7 +6,7 @@ from setuptools import setup, find_namespace_packages
|
|||||||
|
|
||||||
setup(
|
setup(
|
||||||
name="miplearn",
|
name="miplearn",
|
||||||
version="0.4.0",
|
version="0.4.3",
|
||||||
author="Alinson S. Xavier",
|
author="Alinson S. Xavier",
|
||||||
author_email="axavier@anl.gov",
|
author_email="axavier@anl.gov",
|
||||||
description="Extensible Framework for Learning-Enhanced Mixed-Integer Optimization",
|
description="Extensible Framework for Learning-Enhanced Mixed-Integer Optimization",
|
||||||
@@ -14,12 +14,11 @@ setup(
|
|||||||
packages=find_namespace_packages(),
|
packages=find_namespace_packages(),
|
||||||
python_requires=">=3.9",
|
python_requires=">=3.9",
|
||||||
install_requires=[
|
install_requires=[
|
||||||
"Jinja2<3.1",
|
"gurobipy>=12,<13",
|
||||||
"gurobipy>=10,<11",
|
|
||||||
"h5py>=3,<4",
|
"h5py>=3,<4",
|
||||||
"networkx>=2,<3",
|
"networkx>=2,<3",
|
||||||
"numpy>=1,<2",
|
"numpy>=1,<2",
|
||||||
"pandas>=1,<2",
|
"pandas>=2,<3",
|
||||||
"pathos>=0.2,<0.3",
|
"pathos>=0.2,<0.3",
|
||||||
"pyomo>=6,<7",
|
"pyomo>=6,<7",
|
||||||
"scikit-learn>=1,<2",
|
"scikit-learn>=1,<2",
|
||||||
@@ -28,16 +27,17 @@ setup(
|
|||||||
],
|
],
|
||||||
extras_require={
|
extras_require={
|
||||||
"dev": [
|
"dev": [
|
||||||
"Sphinx>=3,<4",
|
"Sphinx>=8,<9",
|
||||||
"black==22.6.0",
|
"black==22.6.0",
|
||||||
"mypy==1.8",
|
"mypy==1.8",
|
||||||
"myst-parser==0.14.0",
|
"myst-parser>=4,<5",
|
||||||
"nbsphinx>=0.9,<0.10",
|
"nbsphinx>=0.9,<0.10",
|
||||||
"pyflakes==2.5.0",
|
"pyflakes==2.5.0",
|
||||||
"pytest>=7,<8",
|
"pytest>=7,<8",
|
||||||
"sphinx-book-theme==0.1.0",
|
"sphinx-book-theme>=1,<2",
|
||||||
"sphinx-multitoc-numbering>=0.1,<0.2",
|
"sphinx-multitoc-numbering==0.1.3",
|
||||||
"twine>=4,<5",
|
"twine>=6,<7",
|
||||||
|
"ipython>=9,<10",
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -28,17 +28,17 @@ def test_mem_component_gp(
|
|||||||
clf.fit.assert_called()
|
clf.fit.assert_called()
|
||||||
x, y = clf.fit.call_args.args
|
x, y = clf.fit.call_args.args
|
||||||
assert x.shape == (3, 50)
|
assert x.shape == (3, 50)
|
||||||
assert y.shape == (3, 412)
|
assert y.shape == (3, 382)
|
||||||
y = y.tolist()
|
y = y.tolist()
|
||||||
assert y[0][40:50] == [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
|
assert y[0][40:50] == [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
|
||||||
assert y[1][40:50] == [1, 1, 0, 1, 1, 1, 1, 1, 1, 1]
|
assert y[1][40:50] == [1, 1, 1, 0, 1, 1, 1, 1, 1, 1]
|
||||||
assert y[2][40:50] == [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
|
assert y[2][40:50] == [1, 1, 1, 1, 0, 0, 1, 1, 1, 1]
|
||||||
|
|
||||||
# Should store violations
|
# Should store violations
|
||||||
assert comp.constrs_ is not None
|
assert comp.constrs_ is not None
|
||||||
assert comp.n_features_ == 50
|
assert comp.n_features_ == 50
|
||||||
assert comp.n_targets_ == 412
|
assert comp.n_targets_ == 382
|
||||||
assert len(comp.constrs_) == 412
|
assert len(comp.constrs_) == 382
|
||||||
|
|
||||||
# Call before-mip
|
# Call before-mip
|
||||||
stats: Dict[str, Any] = {}
|
stats: Dict[str, Any] = {}
|
||||||
@@ -54,7 +54,7 @@ def test_mem_component_gp(
|
|||||||
model.set_cuts.assert_called()
|
model.set_cuts.assert_called()
|
||||||
(cuts_aot_,) = model.set_cuts.call_args.args
|
(cuts_aot_,) = model.set_cuts.call_args.args
|
||||||
assert cuts_aot_ is not None
|
assert cuts_aot_ is not None
|
||||||
assert len(cuts_aot_) == 256
|
assert len(cuts_aot_) == 247
|
||||||
|
|
||||||
|
|
||||||
def test_usage_stab(
|
def test_usage_stab(
|
||||||
@@ -71,5 +71,5 @@ def test_usage_stab(
|
|||||||
comp = MemorizingCutsComponent(clf=clf, extractor=default_extractor)
|
comp = MemorizingCutsComponent(clf=clf, extractor=default_extractor)
|
||||||
solver = LearningSolver(components=[comp])
|
solver = LearningSolver(components=[comp])
|
||||||
solver.fit(data_filenames)
|
solver.fit(data_filenames)
|
||||||
stats = solver.optimize(data_filenames[0], build_model) # type: ignore
|
model, stats = solver.optimize(data_filenames[0], build_model) # type: ignore
|
||||||
assert stats["Cuts: AOT"] > 0
|
assert stats["Cuts: AOT"] > 0
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ def test_mem_component(
|
|||||||
for h5 in [tsp_gp_h5, tsp_pyo_h5]:
|
for h5 in [tsp_gp_h5, tsp_pyo_h5]:
|
||||||
clf = Mock(wraps=DummyClassifier())
|
clf = Mock(wraps=DummyClassifier())
|
||||||
comp = MemorizingLazyComponent(clf=clf, extractor=default_extractor)
|
comp = MemorizingLazyComponent(clf=clf, extractor=default_extractor)
|
||||||
comp.fit(tsp_gp_h5)
|
comp.fit(h5)
|
||||||
|
|
||||||
# Should call fit method with correct arguments
|
# Should call fit method with correct arguments
|
||||||
clf.fit.assert_called()
|
clf.fit.assert_called()
|
||||||
@@ -43,7 +43,7 @@ def test_mem_component(
|
|||||||
# Call before-mip
|
# Call before-mip
|
||||||
stats: Dict[str, Any] = {}
|
stats: Dict[str, Any] = {}
|
||||||
model = Mock()
|
model = Mock()
|
||||||
comp.before_mip(tsp_gp_h5[0], model, stats)
|
comp.before_mip(h5[0], model, stats)
|
||||||
|
|
||||||
# Should call predict with correct args
|
# Should call predict with correct args
|
||||||
clf.predict.assert_called()
|
clf.predict.assert_called()
|
||||||
@@ -65,5 +65,5 @@ def test_usage_tsp(
|
|||||||
comp = MemorizingLazyComponent(clf=clf, extractor=default_extractor)
|
comp = MemorizingLazyComponent(clf=clf, extractor=default_extractor)
|
||||||
solver = LearningSolver(components=[comp])
|
solver = LearningSolver(components=[comp])
|
||||||
solver.fit(data_filenames)
|
solver.fit(data_filenames)
|
||||||
stats = solver.optimize(data_filenames[0], build_model) # type: ignore
|
model, stats = solver.optimize(data_filenames[0], build_model) # type: ignore
|
||||||
assert stats["Lazy Constraints: AOT"] > 0
|
assert stats["Lazy Constraints: AOT"] > 0
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
import os
|
import os
|
||||||
import shutil
|
import shutil
|
||||||
import tempfile
|
import tempfile
|
||||||
from glob import glob
|
from glob import glob, escape
|
||||||
from os.path import dirname, basename, isfile
|
from os.path import dirname, basename, isfile
|
||||||
from tempfile import NamedTemporaryFile
|
from tempfile import NamedTemporaryFile
|
||||||
from typing import List, Any
|
from typing import List, Any
|
||||||
@@ -21,8 +21,8 @@ def _h5_fixture(pattern: str, request: Any) -> List[str]:
|
|||||||
.pkl.gz files, and return the path to the copy. Also register a finalizer,
|
.pkl.gz files, and return the path to the copy. Also register a finalizer,
|
||||||
so that the temporary folder is removed after the tests.
|
so that the temporary folder is removed after the tests.
|
||||||
"""
|
"""
|
||||||
filenames = glob(f"{dirname(__file__)}/fixtures/{pattern}")
|
fixtures_dir = escape(os.path.join(dirname(__file__), "fixtures"))
|
||||||
print(filenames)
|
filenames = glob(os.path.join(fixtures_dir, pattern))
|
||||||
tmpdir = tempfile.mkdtemp()
|
tmpdir = tempfile.mkdtemp()
|
||||||
|
|
||||||
def cleanup() -> None:
|
def cleanup() -> None:
|
||||||
@@ -30,7 +30,6 @@ def _h5_fixture(pattern: str, request: Any) -> List[str]:
|
|||||||
|
|
||||||
request.addfinalizer(cleanup)
|
request.addfinalizer(cleanup)
|
||||||
|
|
||||||
print(tmpdir)
|
|
||||||
for f in filenames:
|
for f in filenames:
|
||||||
fbase, _ = os.path.splitext(f)
|
fbase, _ = os.path.splitext(f)
|
||||||
for ext in [".h5", ".pkl.gz"]:
|
for ext in [".h5", ".pkl.gz"]:
|
||||||
|
|||||||
8
tests/fixtures/gen_stab.py
vendored
8
tests/fixtures/gen_stab.py
vendored
@@ -7,6 +7,7 @@ from miplearn.collectors.basic import BasicCollector
|
|||||||
from miplearn.io import write_pkl_gz
|
from miplearn.io import write_pkl_gz
|
||||||
from miplearn.problems.stab import (
|
from miplearn.problems.stab import (
|
||||||
MaxWeightStableSetGenerator,
|
MaxWeightStableSetGenerator,
|
||||||
|
MaxWeightStableSetPerturber,
|
||||||
build_stab_model_gurobipy,
|
build_stab_model_gurobipy,
|
||||||
build_stab_model_pyomo,
|
build_stab_model_pyomo,
|
||||||
)
|
)
|
||||||
@@ -17,9 +18,12 @@ gen = MaxWeightStableSetGenerator(
|
|||||||
w=uniform(10.0, scale=1.0),
|
w=uniform(10.0, scale=1.0),
|
||||||
n=randint(low=50, high=51),
|
n=randint(low=50, high=51),
|
||||||
p=uniform(loc=0.5, scale=0.0),
|
p=uniform(loc=0.5, scale=0.0),
|
||||||
fix_graph=True,
|
|
||||||
)
|
)
|
||||||
data = gen.generate(3)
|
pr = MaxWeightStableSetPerturber(
|
||||||
|
w_jitter=uniform(0.9, scale=0.2),
|
||||||
|
)
|
||||||
|
base_instance = gen.generate(1)[0]
|
||||||
|
data = pr.perturb(base_instance, 3)
|
||||||
|
|
||||||
params = {"seed": 42, "threads": 1}
|
params = {"seed": 42, "threads": 1}
|
||||||
|
|
||||||
|
|||||||
14
tests/fixtures/gen_tsp.py
vendored
14
tests/fixtures/gen_tsp.py
vendored
@@ -7,6 +7,7 @@ from miplearn.collectors.basic import BasicCollector
|
|||||||
from miplearn.io import write_pkl_gz
|
from miplearn.io import write_pkl_gz
|
||||||
from miplearn.problems.tsp import (
|
from miplearn.problems.tsp import (
|
||||||
TravelingSalesmanGenerator,
|
TravelingSalesmanGenerator,
|
||||||
|
TravelingSalesmanPerturber,
|
||||||
build_tsp_model_gurobipy,
|
build_tsp_model_gurobipy,
|
||||||
build_tsp_model_pyomo,
|
build_tsp_model_pyomo,
|
||||||
)
|
)
|
||||||
@@ -16,12 +17,19 @@ gen = TravelingSalesmanGenerator(
|
|||||||
x=uniform(loc=0.0, scale=1000.0),
|
x=uniform(loc=0.0, scale=1000.0),
|
||||||
y=uniform(loc=0.0, scale=1000.0),
|
y=uniform(loc=0.0, scale=1000.0),
|
||||||
n=randint(low=20, high=21),
|
n=randint(low=20, high=21),
|
||||||
gamma=uniform(loc=1.0, scale=0.25),
|
gamma=uniform(loc=1.0, scale=0.0),
|
||||||
fix_cities=True,
|
|
||||||
round=True,
|
round=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
data = gen.generate(3)
|
# Generate a reference instance with fixed cities
|
||||||
|
reference_instance = gen.generate(1)[0]
|
||||||
|
|
||||||
|
# Generate perturbed instances with same cities but different distance scaling
|
||||||
|
perturber = TravelingSalesmanPerturber(
|
||||||
|
gamma=uniform(loc=1.0, scale=0.25),
|
||||||
|
round=True,
|
||||||
|
)
|
||||||
|
data = perturber.perturb(reference_instance, 3)
|
||||||
|
|
||||||
params = {"seed": 42, "threads": 1}
|
params = {"seed": 42, "threads": 1}
|
||||||
|
|
||||||
|
|||||||
BIN
tests/fixtures/stab-gp-n50-00000.h5
vendored
BIN
tests/fixtures/stab-gp-n50-00000.h5
vendored
Binary file not shown.
BIN
tests/fixtures/stab-gp-n50-00000.mps.gz
vendored
BIN
tests/fixtures/stab-gp-n50-00000.mps.gz
vendored
Binary file not shown.
BIN
tests/fixtures/stab-gp-n50-00000.pkl.gz
vendored
BIN
tests/fixtures/stab-gp-n50-00000.pkl.gz
vendored
Binary file not shown.
BIN
tests/fixtures/stab-gp-n50-00001.h5
vendored
BIN
tests/fixtures/stab-gp-n50-00001.h5
vendored
Binary file not shown.
BIN
tests/fixtures/stab-gp-n50-00001.mps.gz
vendored
BIN
tests/fixtures/stab-gp-n50-00001.mps.gz
vendored
Binary file not shown.
BIN
tests/fixtures/stab-gp-n50-00001.pkl.gz
vendored
BIN
tests/fixtures/stab-gp-n50-00001.pkl.gz
vendored
Binary file not shown.
BIN
tests/fixtures/stab-gp-n50-00002.h5
vendored
BIN
tests/fixtures/stab-gp-n50-00002.h5
vendored
Binary file not shown.
BIN
tests/fixtures/stab-gp-n50-00002.mps.gz
vendored
BIN
tests/fixtures/stab-gp-n50-00002.mps.gz
vendored
Binary file not shown.
BIN
tests/fixtures/stab-gp-n50-00002.pkl.gz
vendored
BIN
tests/fixtures/stab-gp-n50-00002.pkl.gz
vendored
Binary file not shown.
BIN
tests/fixtures/stab-pyo-n50-00000.h5
vendored
BIN
tests/fixtures/stab-pyo-n50-00000.h5
vendored
Binary file not shown.
BIN
tests/fixtures/stab-pyo-n50-00000.mps.gz
vendored
BIN
tests/fixtures/stab-pyo-n50-00000.mps.gz
vendored
Binary file not shown.
BIN
tests/fixtures/stab-pyo-n50-00000.pkl.gz
vendored
BIN
tests/fixtures/stab-pyo-n50-00000.pkl.gz
vendored
Binary file not shown.
BIN
tests/fixtures/stab-pyo-n50-00001.h5
vendored
BIN
tests/fixtures/stab-pyo-n50-00001.h5
vendored
Binary file not shown.
BIN
tests/fixtures/stab-pyo-n50-00001.mps.gz
vendored
BIN
tests/fixtures/stab-pyo-n50-00001.mps.gz
vendored
Binary file not shown.
BIN
tests/fixtures/stab-pyo-n50-00001.pkl.gz
vendored
BIN
tests/fixtures/stab-pyo-n50-00001.pkl.gz
vendored
Binary file not shown.
BIN
tests/fixtures/stab-pyo-n50-00002.h5
vendored
BIN
tests/fixtures/stab-pyo-n50-00002.h5
vendored
Binary file not shown.
BIN
tests/fixtures/stab-pyo-n50-00002.mps.gz
vendored
BIN
tests/fixtures/stab-pyo-n50-00002.mps.gz
vendored
Binary file not shown.
BIN
tests/fixtures/stab-pyo-n50-00002.pkl.gz
vendored
BIN
tests/fixtures/stab-pyo-n50-00002.pkl.gz
vendored
Binary file not shown.
BIN
tests/fixtures/tsp-gp-n20-00000.h5
vendored
BIN
tests/fixtures/tsp-gp-n20-00000.h5
vendored
Binary file not shown.
BIN
tests/fixtures/tsp-gp-n20-00000.mps.gz
vendored
BIN
tests/fixtures/tsp-gp-n20-00000.mps.gz
vendored
Binary file not shown.
BIN
tests/fixtures/tsp-gp-n20-00000.pkl.gz
vendored
BIN
tests/fixtures/tsp-gp-n20-00000.pkl.gz
vendored
Binary file not shown.
BIN
tests/fixtures/tsp-gp-n20-00001.h5
vendored
BIN
tests/fixtures/tsp-gp-n20-00001.h5
vendored
Binary file not shown.
BIN
tests/fixtures/tsp-gp-n20-00001.mps.gz
vendored
BIN
tests/fixtures/tsp-gp-n20-00001.mps.gz
vendored
Binary file not shown.
BIN
tests/fixtures/tsp-gp-n20-00001.pkl.gz
vendored
BIN
tests/fixtures/tsp-gp-n20-00001.pkl.gz
vendored
Binary file not shown.
BIN
tests/fixtures/tsp-gp-n20-00002.h5
vendored
BIN
tests/fixtures/tsp-gp-n20-00002.h5
vendored
Binary file not shown.
BIN
tests/fixtures/tsp-gp-n20-00002.mps.gz
vendored
BIN
tests/fixtures/tsp-gp-n20-00002.mps.gz
vendored
Binary file not shown.
BIN
tests/fixtures/tsp-gp-n20-00002.pkl.gz
vendored
BIN
tests/fixtures/tsp-gp-n20-00002.pkl.gz
vendored
Binary file not shown.
BIN
tests/fixtures/tsp-pyo-n20-00000.h5
vendored
BIN
tests/fixtures/tsp-pyo-n20-00000.h5
vendored
Binary file not shown.
BIN
tests/fixtures/tsp-pyo-n20-00000.mps.gz
vendored
BIN
tests/fixtures/tsp-pyo-n20-00000.mps.gz
vendored
Binary file not shown.
BIN
tests/fixtures/tsp-pyo-n20-00000.pkl.gz
vendored
BIN
tests/fixtures/tsp-pyo-n20-00000.pkl.gz
vendored
Binary file not shown.
BIN
tests/fixtures/tsp-pyo-n20-00001.h5
vendored
BIN
tests/fixtures/tsp-pyo-n20-00001.h5
vendored
Binary file not shown.
BIN
tests/fixtures/tsp-pyo-n20-00001.mps.gz
vendored
BIN
tests/fixtures/tsp-pyo-n20-00001.mps.gz
vendored
Binary file not shown.
BIN
tests/fixtures/tsp-pyo-n20-00001.pkl.gz
vendored
BIN
tests/fixtures/tsp-pyo-n20-00001.pkl.gz
vendored
Binary file not shown.
BIN
tests/fixtures/tsp-pyo-n20-00002.h5
vendored
BIN
tests/fixtures/tsp-pyo-n20-00002.h5
vendored
Binary file not shown.
BIN
tests/fixtures/tsp-pyo-n20-00002.mps.gz
vendored
BIN
tests/fixtures/tsp-pyo-n20-00002.mps.gz
vendored
Binary file not shown.
BIN
tests/fixtures/tsp-pyo-n20-00002.pkl.gz
vendored
BIN
tests/fixtures/tsp-pyo-n20-00002.pkl.gz
vendored
Binary file not shown.
@@ -18,37 +18,34 @@ def test_binpack_generator() -> None:
|
|||||||
n=randint(low=10, high=11),
|
n=randint(low=10, high=11),
|
||||||
sizes=uniform(loc=0, scale=10),
|
sizes=uniform(loc=0, scale=10),
|
||||||
capacity=uniform(loc=100, scale=0),
|
capacity=uniform(loc=100, scale=0),
|
||||||
sizes_jitter=uniform(loc=0.9, scale=0.2),
|
|
||||||
capacity_jitter=uniform(loc=0.9, scale=0.2),
|
|
||||||
fix_items=True,
|
|
||||||
)
|
)
|
||||||
data = gen.generate(2)
|
data = gen.generate(2)
|
||||||
assert data[0].sizes.tolist() == [
|
assert data[0].sizes.tolist() == [
|
||||||
3.39,
|
3.75,
|
||||||
10.4,
|
9.51,
|
||||||
7.81,
|
7.32,
|
||||||
5.64,
|
5.99,
|
||||||
1.46,
|
1.56,
|
||||||
1.46,
|
1.56,
|
||||||
0.56,
|
|
||||||
8.7,
|
|
||||||
5.93,
|
|
||||||
6.79,
|
|
||||||
]
|
|
||||||
assert data[0].capacity == 102.24
|
|
||||||
assert data[1].sizes.tolist() == [
|
|
||||||
3.48,
|
|
||||||
9.11,
|
|
||||||
7.12,
|
|
||||||
5.93,
|
|
||||||
1.65,
|
|
||||||
1.47,
|
|
||||||
0.58,
|
0.58,
|
||||||
8.82,
|
8.66,
|
||||||
5.47,
|
6.01,
|
||||||
7.23,
|
7.08,
|
||||||
]
|
]
|
||||||
assert data[1].capacity == 93.41
|
assert data[0].capacity == 100.0
|
||||||
|
assert data[1].sizes.tolist() == [
|
||||||
|
0.21,
|
||||||
|
9.7,
|
||||||
|
8.32,
|
||||||
|
2.12,
|
||||||
|
1.82,
|
||||||
|
1.83,
|
||||||
|
3.04,
|
||||||
|
5.25,
|
||||||
|
4.32,
|
||||||
|
2.91,
|
||||||
|
]
|
||||||
|
assert data[1].capacity == 100.0
|
||||||
|
|
||||||
|
|
||||||
def test_binpack() -> None:
|
def test_binpack() -> None:
|
||||||
|
|||||||
89
tests/problems/test_maxcut.py
Normal file
89
tests/problems/test_maxcut.py
Normal file
@@ -0,0 +1,89 @@
|
|||||||
|
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||||
|
# Copyright (C) 2020-2025, UChicago Argonne, LLC. All rights reserved.
|
||||||
|
# Released under the modified BSD license. See COPYING.md for more details.
|
||||||
|
|
||||||
|
import random
|
||||||
|
from tempfile import TemporaryDirectory
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
from scipy.stats import randint, uniform
|
||||||
|
|
||||||
|
from miplearn.h5 import H5File
|
||||||
|
from miplearn.problems.maxcut import (
|
||||||
|
MaxCutGenerator,
|
||||||
|
build_maxcut_model_gurobipy,
|
||||||
|
build_maxcut_model_pyomo,
|
||||||
|
)
|
||||||
|
from miplearn.solvers.abstract import AbstractModel
|
||||||
|
|
||||||
|
|
||||||
|
def _set_seed() -> None:
|
||||||
|
random.seed(42)
|
||||||
|
np.random.seed(42)
|
||||||
|
|
||||||
|
|
||||||
|
def test_maxcut_generator() -> None:
|
||||||
|
_set_seed()
|
||||||
|
gen = MaxCutGenerator(
|
||||||
|
n=randint(low=5, high=6),
|
||||||
|
p=uniform(loc=0.5, scale=0.0),
|
||||||
|
)
|
||||||
|
data = gen.generate(3)
|
||||||
|
assert len(data) == 3
|
||||||
|
assert list(data[0].graph.nodes()) == [0, 1, 2, 3, 4]
|
||||||
|
assert list(data[0].graph.edges()) == [
|
||||||
|
(0, 2),
|
||||||
|
(0, 3),
|
||||||
|
(0, 4),
|
||||||
|
(2, 3),
|
||||||
|
(2, 4),
|
||||||
|
(3, 4),
|
||||||
|
]
|
||||||
|
assert data[0].weights.tolist() == [-1, 1, -1, -1, -1, 1]
|
||||||
|
|
||||||
|
|
||||||
|
def test_maxcut_model() -> None:
|
||||||
|
_set_seed()
|
||||||
|
data = MaxCutGenerator(
|
||||||
|
n=randint(low=10, high=11),
|
||||||
|
p=uniform(loc=0.5, scale=0.0),
|
||||||
|
).generate(1)[0]
|
||||||
|
for model in [
|
||||||
|
build_maxcut_model_gurobipy(data),
|
||||||
|
build_maxcut_model_pyomo(data),
|
||||||
|
]:
|
||||||
|
assert isinstance(model, AbstractModel)
|
||||||
|
with TemporaryDirectory() as tempdir:
|
||||||
|
with H5File(f"{tempdir}/data.h5", "w") as h5:
|
||||||
|
model.extract_after_load(h5)
|
||||||
|
obj_lin = h5.get_array("static_var_obj_coeffs")
|
||||||
|
assert obj_lin is not None
|
||||||
|
assert obj_lin.tolist() == [
|
||||||
|
3.0,
|
||||||
|
1.0,
|
||||||
|
3.0,
|
||||||
|
1.0,
|
||||||
|
-1.0,
|
||||||
|
0.0,
|
||||||
|
-1.0,
|
||||||
|
0.0,
|
||||||
|
-1.0,
|
||||||
|
0.0,
|
||||||
|
]
|
||||||
|
obj_quad = h5.get_array("static_var_obj_coeffs_quad")
|
||||||
|
assert obj_quad is not None
|
||||||
|
assert obj_quad.tolist() == [
|
||||||
|
[0.0, 0.0, -1.0, 1.0, -1.0, 0.0, 0.0, 0.0, -1.0, -1.0],
|
||||||
|
[0.0, 0.0, 1.0, -1.0, 0.0, -1.0, -1.0, 0.0, 0.0, 1.0],
|
||||||
|
[0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, -1.0, -1.0],
|
||||||
|
[0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 1.0, -1.0, 0.0, 0.0],
|
||||||
|
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
|
||||||
|
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
|
||||||
|
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
|
||||||
|
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, -1.0],
|
||||||
|
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
|
||||||
|
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
|
||||||
|
]
|
||||||
|
model.optimize()
|
||||||
|
model.extract_after_mip(h5)
|
||||||
|
assert h5.get_scalar("mip_obj_value") == -4
|
||||||
@@ -21,27 +21,30 @@ def test_knapsack_generator() -> None:
|
|||||||
K=randint(low=500, high=501),
|
K=randint(low=500, high=501),
|
||||||
u=uniform(loc=0.0, scale=1.0),
|
u=uniform(loc=0.0, scale=1.0),
|
||||||
alpha=uniform(loc=0.25, scale=0.0),
|
alpha=uniform(loc=0.25, scale=0.0),
|
||||||
fix_w=True,
|
|
||||||
w_jitter=uniform(loc=0.9, scale=0.2),
|
|
||||||
p_jitter=uniform(loc=0.9, scale=0.2),
|
|
||||||
round=True,
|
|
||||||
)
|
)
|
||||||
data = gen.generate(2)
|
data = gen.generate(1)
|
||||||
assert data[0].prices.tolist() == [433.0, 477.0, 802.0, 494.0, 458.0]
|
assert data[0].prices.tolist() == [380.0, 521.0, 729.0, 476.0, 466.0]
|
||||||
assert data[0].capacities.tolist() == [458.0, 357.0, 392.0]
|
assert data[0].capacities.tolist() == [443.0, 382.0, 389.0]
|
||||||
assert data[0].weights.tolist() == [
|
assert data[0].weights.tolist() == [
|
||||||
[111.0, 392.0, 945.0, 276.0, 108.0],
|
[102, 435, 860, 270, 106],
|
||||||
[64.0, 633.0, 20.0, 602.0, 110.0],
|
[71, 700, 20, 614, 121],
|
||||||
[510.0, 203.0, 303.0, 469.0, 85.0],
|
[466, 214, 330, 458, 87],
|
||||||
]
|
]
|
||||||
|
|
||||||
assert data[1].prices.tolist() == [344.0, 527.0, 658.0, 519.0, 460.0]
|
|
||||||
assert data[1].capacities.tolist() == [449.0, 377.0, 380.0]
|
def test_knapsack_generator_callable() -> None:
|
||||||
assert data[1].weights.tolist() == [
|
np.random.seed(42)
|
||||||
[92.0, 473.0, 871.0, 264.0, 96.0],
|
gen = MultiKnapsackGenerator(
|
||||||
[67.0, 664.0, 21.0, 628.0, 129.0],
|
n=randint(low=10, high=11),
|
||||||
[436.0, 209.0, 309.0, 481.0, 86.0],
|
m=lambda n: n // 3,
|
||||||
]
|
w=randint(low=0, high=1000),
|
||||||
|
K=randint(low=500, high=501),
|
||||||
|
u=uniform(loc=0.0, scale=1.0),
|
||||||
|
alpha=uniform(loc=0.25, scale=0.0),
|
||||||
|
)
|
||||||
|
data = gen.generate(1)[0]
|
||||||
|
assert data.weights.shape[1] == 10
|
||||||
|
assert data.weights.shape[0] == 3
|
||||||
|
|
||||||
|
|
||||||
def test_knapsack_model() -> None:
|
def test_knapsack_model() -> None:
|
||||||
|
|||||||
@@ -17,12 +17,8 @@ def test_pmedian() -> None:
|
|||||||
p=randint(low=2, high=3),
|
p=randint(low=2, high=3),
|
||||||
demands=uniform(loc=0, scale=20),
|
demands=uniform(loc=0, scale=20),
|
||||||
capacities=uniform(loc=0, scale=100),
|
capacities=uniform(loc=0, scale=100),
|
||||||
distances_jitter=uniform(loc=0.95, scale=0.1),
|
|
||||||
demands_jitter=uniform(loc=0.95, scale=0.1),
|
|
||||||
capacities_jitter=uniform(loc=0.95, scale=0.1),
|
|
||||||
fixed=True,
|
|
||||||
)
|
)
|
||||||
data = gen.generate(2)
|
data = gen.generate(1)
|
||||||
|
|
||||||
assert data[0].p == 2
|
assert data[0].p == 2
|
||||||
assert data[0].demands.tolist() == [0.41, 19.4, 16.65, 4.25, 3.64]
|
assert data[0].demands.tolist() == [0.41, 19.4, 16.65, 4.25, 3.64]
|
||||||
@@ -35,19 +31,23 @@ def test_pmedian() -> None:
|
|||||||
[33.2, 17.06, 70.92, 56.56, 0.0],
|
[33.2, 17.06, 70.92, 56.56, 0.0],
|
||||||
]
|
]
|
||||||
|
|
||||||
assert data[1].p == 2
|
|
||||||
assert data[1].demands.tolist() == [0.42, 19.03, 16.68, 4.27, 3.53]
|
|
||||||
assert data[1].capacities.tolist() == [19.2, 31.26, 54.79, 44.9, 29.41]
|
|
||||||
assert data[1].distances.tolist() == [
|
|
||||||
[0.0, 51.6, 83.31, 33.77, 31.95],
|
|
||||||
[51.6, 0.0, 70.25, 71.09, 17.05],
|
|
||||||
[83.31, 70.25, 0.0, 68.81, 67.62],
|
|
||||||
[33.77, 71.09, 68.81, 0.0, 58.88],
|
|
||||||
[31.95, 17.05, 67.62, 58.88, 0.0],
|
|
||||||
]
|
|
||||||
|
|
||||||
model = build_pmedian_model_gurobipy(data[0])
|
model = build_pmedian_model_gurobipy(data[0])
|
||||||
assert model.inner.numVars == 30
|
assert model.inner.numVars == 30
|
||||||
assert model.inner.numConstrs == 11
|
assert model.inner.numConstrs == 11
|
||||||
model.optimize()
|
model.optimize()
|
||||||
assert round(model.inner.objVal) == 107
|
assert round(model.inner.objVal) == 107
|
||||||
|
|
||||||
|
|
||||||
|
def test_pmedian_generator_callable() -> None:
|
||||||
|
np.random.seed(42)
|
||||||
|
gen = PMedianGenerator(
|
||||||
|
x=uniform(loc=0.0, scale=100.0),
|
||||||
|
y=uniform(loc=0.0, scale=100.0),
|
||||||
|
n=randint(low=10, high=11),
|
||||||
|
p=lambda n: n // 5,
|
||||||
|
demands=uniform(loc=0, scale=20),
|
||||||
|
capacities=uniform(loc=0, scale=100),
|
||||||
|
)
|
||||||
|
data = gen.generate(1)
|
||||||
|
assert data[0].p == 2
|
||||||
|
assert len(data[0].demands) == 10
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
# Copyright (C) 2020-2022, UChicago Argonne, LLC. All rights reserved.
|
# Copyright (C) 2020-2022, UChicago Argonne, LLC. All rights reserved.
|
||||||
# Released under the modified BSD license. See COPYING.md for more details.
|
# Released under the modified BSD license. See COPYING.md for more details.
|
||||||
|
|
||||||
from tempfile import NamedTemporaryFile
|
from tempfile import TemporaryDirectory
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from scipy.stats import randint, uniform
|
from scipy.stats import randint, uniform
|
||||||
@@ -23,51 +23,32 @@ def test_set_cover_generator() -> None:
|
|||||||
n_elements=randint(low=3, high=4),
|
n_elements=randint(low=3, high=4),
|
||||||
n_sets=randint(low=5, high=6),
|
n_sets=randint(low=5, high=6),
|
||||||
costs=uniform(loc=0.0, scale=100.0),
|
costs=uniform(loc=0.0, scale=100.0),
|
||||||
costs_jitter=uniform(loc=0.95, scale=0.10),
|
|
||||||
density=uniform(loc=0.5, scale=0),
|
density=uniform(loc=0.5, scale=0),
|
||||||
K=uniform(loc=25, scale=0),
|
K=uniform(loc=25, scale=0),
|
||||||
fix_sets=False,
|
|
||||||
)
|
)
|
||||||
data = gen.generate(2)
|
data = gen.generate(1)
|
||||||
|
|
||||||
assert data[0].costs.round(1).tolist() == [136.8, 86.2, 25.7, 27.3, 102.5]
|
assert data[0].costs.round(1).tolist() == [136.8, 86.2, 25.7, 27.3, 102.5]
|
||||||
assert data[0].incidence_matrix.tolist() == [
|
assert data[0].incidence_matrix.tolist() == [
|
||||||
[1, 0, 1, 0, 1],
|
[1, 0, 1, 0, 1],
|
||||||
[1, 1, 0, 0, 0],
|
[1, 1, 0, 0, 0],
|
||||||
[1, 0, 0, 1, 1],
|
[1, 0, 0, 1, 1],
|
||||||
]
|
]
|
||||||
assert data[1].costs.round(1).tolist() == [63.5, 76.6, 48.1, 74.1, 93.3]
|
|
||||||
assert data[1].incidence_matrix.tolist() == [
|
|
||||||
[1, 1, 0, 1, 1],
|
|
||||||
[0, 1, 0, 1, 0],
|
|
||||||
[0, 1, 1, 0, 0],
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
def test_set_cover_generator_with_fixed_sets() -> None:
|
def test_set_cover_generator_callable() -> None:
|
||||||
np.random.seed(42)
|
np.random.seed(42)
|
||||||
gen = SetCoverGenerator(
|
gen = SetCoverGenerator(
|
||||||
n_elements=randint(low=3, high=4),
|
n_elements=randint(low=4, high=5),
|
||||||
n_sets=randint(low=5, high=6),
|
n_sets=lambda n: n * 2,
|
||||||
costs=uniform(loc=0.0, scale=100.0),
|
costs=uniform(loc=10.0, scale=0.0),
|
||||||
costs_jitter=uniform(loc=0.95, scale=0.10),
|
density=uniform(loc=0.5, scale=0),
|
||||||
density=uniform(loc=0.5, scale=0.00),
|
K=uniform(loc=0, scale=0),
|
||||||
fix_sets=True,
|
|
||||||
)
|
)
|
||||||
data = gen.generate(3)
|
data = gen.generate(1)
|
||||||
|
n_elements, n_sets = data[0].incidence_matrix.shape
|
||||||
assert data[0].costs.tolist() == [136.75, 86.17, 25.71, 27.31, 102.48]
|
assert n_elements == 4
|
||||||
assert data[1].costs.tolist() == [135.38, 82.26, 26.92, 26.58, 98.28]
|
assert n_sets == 8
|
||||||
assert data[2].costs.tolist() == [138.37, 85.15, 26.95, 27.22, 106.17]
|
assert len(data[0].costs) == 8
|
||||||
|
|
||||||
print(data[0].incidence_matrix)
|
|
||||||
|
|
||||||
for i in range(3):
|
|
||||||
assert data[i].incidence_matrix.tolist() == [
|
|
||||||
[1, 0, 1, 0, 1],
|
|
||||||
[1, 1, 0, 0, 0],
|
|
||||||
[1, 0, 0, 1, 1],
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
def test_set_cover() -> None:
|
def test_set_cover() -> None:
|
||||||
@@ -86,8 +67,8 @@ def test_set_cover() -> None:
|
|||||||
build_setcover_model_gurobipy(data),
|
build_setcover_model_gurobipy(data),
|
||||||
]:
|
]:
|
||||||
assert isinstance(model, AbstractModel)
|
assert isinstance(model, AbstractModel)
|
||||||
with NamedTemporaryFile() as tempfile:
|
with TemporaryDirectory() as tempdir:
|
||||||
with H5File(tempfile.name) as h5:
|
with H5File(f"{tempdir}/data.h5", "w") as h5:
|
||||||
model.optimize()
|
model.optimize()
|
||||||
model.extract_after_mip(h5)
|
model.extract_after_mip(h5)
|
||||||
assert h5.get_scalar("mip_obj_value") == 11.0
|
assert h5.get_scalar("mip_obj_value") == 11.0
|
||||||
|
|||||||
@@ -3,9 +3,11 @@
|
|||||||
# Released under the modified BSD license. See COPYING.md for more details.
|
# Released under the modified BSD license. See COPYING.md for more details.
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
from scipy.stats import randint, uniform
|
||||||
|
|
||||||
from miplearn.problems.setpack import (
|
from miplearn.problems.setpack import (
|
||||||
SetPackData,
|
SetPackData,
|
||||||
|
SetPackGenerator,
|
||||||
build_setpack_model_gurobipy,
|
build_setpack_model_gurobipy,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -24,3 +26,19 @@ def test_setpack() -> None:
|
|||||||
model = build_setpack_model_gurobipy(data)
|
model = build_setpack_model_gurobipy(data)
|
||||||
model.optimize()
|
model.optimize()
|
||||||
assert model.inner.objval == -22.0
|
assert model.inner.objval == -22.0
|
||||||
|
|
||||||
|
|
||||||
|
def test_set_pack_generator_callable() -> None:
|
||||||
|
np.random.seed(42)
|
||||||
|
gen = SetPackGenerator(
|
||||||
|
n_elements=randint(low=4, high=5),
|
||||||
|
n_sets=lambda n: n * 2,
|
||||||
|
costs=uniform(loc=10.0, scale=0.0),
|
||||||
|
density=uniform(loc=0.5, scale=0),
|
||||||
|
K=uniform(loc=0, scale=0),
|
||||||
|
)
|
||||||
|
data = gen.generate(1)
|
||||||
|
n_elements, n_sets = data[0].incidence_matrix.shape
|
||||||
|
assert n_elements == 4
|
||||||
|
assert n_sets == 8
|
||||||
|
assert len(data[0].costs) == 8
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||||
# Copyright (C) 2020-2022, UChicago Argonne, LLC. All rights reserved.
|
# Copyright (C) 2020-2022, UChicago Argonne, LLC. All rights reserved.
|
||||||
# Released under the modified BSD license. See COPYING.md for more details.
|
# Released under the modified BSD license. See COPYING.md for more details.
|
||||||
from tempfile import NamedTemporaryFile
|
from tempfile import TemporaryDirectory
|
||||||
|
|
||||||
import networkx as nx
|
import networkx as nx
|
||||||
import numpy as np
|
import numpy as np
|
||||||
@@ -25,8 +25,8 @@ def test_stab() -> None:
|
|||||||
build_stab_model_pyomo(data),
|
build_stab_model_pyomo(data),
|
||||||
]:
|
]:
|
||||||
assert isinstance(model, AbstractModel)
|
assert isinstance(model, AbstractModel)
|
||||||
with NamedTemporaryFile() as tempfile:
|
with TemporaryDirectory() as tempdir:
|
||||||
with H5File(tempfile.name) as h5:
|
with H5File(f"{tempdir}/data.h5", "w") as h5:
|
||||||
model.optimize()
|
model.optimize()
|
||||||
model.extract_after_mip(h5)
|
model.extract_after_mip(h5)
|
||||||
assert h5.get_scalar("mip_obj_value") == -2.0
|
assert h5.get_scalar("mip_obj_value") == -2.0
|
||||||
|
|||||||
@@ -17,56 +17,30 @@ def test_tsp_generator() -> None:
|
|||||||
gen = TravelingSalesmanGenerator(
|
gen = TravelingSalesmanGenerator(
|
||||||
x=uniform(loc=0.0, scale=1000.0),
|
x=uniform(loc=0.0, scale=1000.0),
|
||||||
y=uniform(loc=0.0, scale=1000.0),
|
y=uniform(loc=0.0, scale=1000.0),
|
||||||
n=randint(low=3, high=4),
|
n=randint(low=5, high=6),
|
||||||
gamma=uniform(loc=1.0, scale=0.25),
|
gamma=uniform(loc=1.0, scale=0.25),
|
||||||
fix_cities=True,
|
|
||||||
round=True,
|
round=True,
|
||||||
)
|
)
|
||||||
data = gen.generate(2)
|
data = gen.generate(1)
|
||||||
assert data[0].distances.tolist() == [
|
assert data[0].distances.tolist() == [
|
||||||
[0.0, 591.0, 996.0],
|
[0.0, 525.0, 950.0, 392.0, 382.0],
|
||||||
[591.0, 0.0, 765.0],
|
[525.0, 0.0, 752.0, 761.0, 178.0],
|
||||||
[996.0, 765.0, 0.0],
|
[950.0, 752.0, 0.0, 809.0, 721.0],
|
||||||
]
|
[392.0, 761.0, 809.0, 0.0, 700.0],
|
||||||
assert data[1].distances.tolist() == [
|
[382.0, 178.0, 721.0, 700.0, 0.0],
|
||||||
[0.0, 556.0, 853.0],
|
|
||||||
[556.0, 0.0, 779.0],
|
|
||||||
[853.0, 779.0, 0.0],
|
|
||||||
]
|
]
|
||||||
|
|
||||||
|
model = build_tsp_model_gurobipy(data[0])
|
||||||
def test_tsp() -> None:
|
|
||||||
data = TravelingSalesmanData(
|
|
||||||
n_cities=6,
|
|
||||||
distances=squareform(
|
|
||||||
pdist(
|
|
||||||
[
|
|
||||||
[0.0, 0.0],
|
|
||||||
[1.0, 0.0],
|
|
||||||
[2.0, 0.0],
|
|
||||||
[3.0, 0.0],
|
|
||||||
[0.0, 1.0],
|
|
||||||
[3.0, 1.0],
|
|
||||||
]
|
|
||||||
)
|
|
||||||
),
|
|
||||||
)
|
|
||||||
model = build_tsp_model_gurobipy(data)
|
|
||||||
model.optimize()
|
model.optimize()
|
||||||
assert model.inner.getAttr("x", model.inner.getVars()) == [
|
assert model.inner.getAttr("x", model.inner.getVars()) == [
|
||||||
1.0,
|
|
||||||
0.0,
|
0.0,
|
||||||
0.0,
|
0.0,
|
||||||
1.0,
|
1.0,
|
||||||
0.0,
|
|
||||||
1.0,
|
1.0,
|
||||||
0.0,
|
|
||||||
0.0,
|
|
||||||
0.0,
|
|
||||||
1.0,
|
1.0,
|
||||||
0.0,
|
0.0,
|
||||||
0.0,
|
|
||||||
0.0,
|
|
||||||
1.0,
|
1.0,
|
||||||
1.0,
|
1.0,
|
||||||
|
0.0,
|
||||||
|
0.0,
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -21,32 +21,21 @@ def test_generator() -> None:
|
|||||||
min_power=uniform(loc=0.25, scale=0.5),
|
min_power=uniform(loc=0.25, scale=0.5),
|
||||||
cost_startup=uniform(loc=1, scale=1),
|
cost_startup=uniform(loc=1, scale=1),
|
||||||
cost_prod=uniform(loc=1, scale=1),
|
cost_prod=uniform(loc=1, scale=1),
|
||||||
|
cost_prod_quad=uniform(loc=1, scale=1),
|
||||||
cost_fixed=uniform(loc=1, scale=1),
|
cost_fixed=uniform(loc=1, scale=1),
|
||||||
min_uptime=randint(low=1, high=8),
|
min_uptime=randint(low=1, high=8),
|
||||||
min_downtime=randint(low=1, high=8),
|
min_downtime=randint(low=1, high=8),
|
||||||
cost_jitter=uniform(loc=0.75, scale=0.5),
|
|
||||||
demand_jitter=uniform(loc=0.9, scale=0.2),
|
|
||||||
fix_units=True,
|
|
||||||
)
|
)
|
||||||
data = gen.generate(2)
|
data = gen.generate(1)
|
||||||
|
assert data[0].demand.tolist() == [430.3, 511.29, 484.91, 860.61]
|
||||||
assert data[0].demand.tolist() == [430.3, 518.65, 448.16, 860.61]
|
|
||||||
assert data[0].min_power.tolist() == [120.05, 156.73, 124.44]
|
assert data[0].min_power.tolist() == [120.05, 156.73, 124.44]
|
||||||
assert data[0].max_power.tolist() == [218.54, 477.82, 379.4]
|
assert data[0].max_power.tolist() == [218.54, 477.82, 379.4]
|
||||||
assert data[0].min_uptime.tolist() == [3, 3, 5]
|
assert data[0].min_uptime.tolist() == [3, 3, 5]
|
||||||
assert data[0].min_downtime.tolist() == [4, 3, 6]
|
assert data[0].min_downtime.tolist() == [4, 3, 6]
|
||||||
assert data[0].cost_startup.tolist() == [1.06, 1.72, 1.94]
|
assert data[0].cost_startup.tolist() == [1.06, 1.72, 1.94]
|
||||||
assert data[0].cost_prod.tolist() == [1.0, 1.99, 1.62]
|
assert data[0].cost_prod.tolist() == [1.0, 1.99, 1.62]
|
||||||
assert data[0].cost_fixed.tolist() == [1.61, 1.01, 1.02]
|
assert data[0].cost_prod_quad.tolist() == [1.6117, 1.0071, 1.0231]
|
||||||
|
assert data[0].cost_fixed.tolist() == [1.52, 1.4, 1.05]
|
||||||
assert data[1].demand.tolist() == [407.3, 476.18, 458.77, 840.38]
|
|
||||||
assert data[1].min_power.tolist() == [120.05, 156.73, 124.44]
|
|
||||||
assert data[1].max_power.tolist() == [218.54, 477.82, 379.4]
|
|
||||||
assert data[1].min_uptime.tolist() == [3, 3, 5]
|
|
||||||
assert data[1].min_downtime.tolist() == [4, 3, 6]
|
|
||||||
assert data[1].cost_startup.tolist() == [1.32, 1.69, 2.29]
|
|
||||||
assert data[1].cost_prod.tolist() == [1.09, 1.94, 1.23]
|
|
||||||
assert data[1].cost_fixed.tolist() == [1.97, 1.04, 0.96]
|
|
||||||
|
|
||||||
|
|
||||||
def test_uc() -> None:
|
def test_uc() -> None:
|
||||||
@@ -59,6 +48,7 @@ def test_uc() -> None:
|
|||||||
cost_startup=np.array([100, 120, 200]),
|
cost_startup=np.array([100, 120, 200]),
|
||||||
cost_prod=np.array([1.0, 1.25, 1.5]),
|
cost_prod=np.array([1.0, 1.25, 1.5]),
|
||||||
cost_fixed=np.array([10, 12, 9]),
|
cost_fixed=np.array([10, 12, 9]),
|
||||||
|
cost_prod_quad=np.array([0, 0, 0]),
|
||||||
)
|
)
|
||||||
model = build_uc_model_gurobipy(data)
|
model = build_uc_model_gurobipy(data)
|
||||||
model.optimize()
|
model.optimize()
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||||
# Copyright (C) 2020-2022, UChicago Argonne, LLC. All rights reserved.
|
# Copyright (C) 2020-2022, UChicago Argonne, LLC. All rights reserved.
|
||||||
# Released under the modified BSD license. See COPYING.md for more details.
|
# Released under the modified BSD license. See COPYING.md for more details.
|
||||||
from tempfile import NamedTemporaryFile
|
from tempfile import TemporaryDirectory
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
@@ -11,31 +11,31 @@ from miplearn.h5 import H5File
|
|||||||
|
|
||||||
|
|
||||||
def test_h5() -> None:
|
def test_h5() -> None:
|
||||||
file = NamedTemporaryFile()
|
with TemporaryDirectory() as tempdir:
|
||||||
h5 = H5File(file.name)
|
with H5File(f"{tempdir}/data.h5", "w") as h5:
|
||||||
_assert_roundtrip_scalar(h5, "A")
|
_assert_roundtrip_scalar(h5, "A")
|
||||||
_assert_roundtrip_scalar(h5, True)
|
_assert_roundtrip_scalar(h5, True)
|
||||||
_assert_roundtrip_scalar(h5, 1)
|
_assert_roundtrip_scalar(h5, 1)
|
||||||
_assert_roundtrip_scalar(h5, 1.0)
|
_assert_roundtrip_scalar(h5, 1.0)
|
||||||
assert h5.get_scalar("unknown-key") is None
|
assert h5.get_scalar("unknown-key") is None
|
||||||
|
|
||||||
_assert_roundtrip_array(h5, np.array([True, False]))
|
_assert_roundtrip_array(h5, np.array([True, False]))
|
||||||
_assert_roundtrip_array(h5, np.array([1, 2, 3]))
|
_assert_roundtrip_array(h5, np.array([1, 2, 3]))
|
||||||
_assert_roundtrip_array(h5, np.array([1.0, 2.0, 3.0]))
|
_assert_roundtrip_array(h5, np.array([1.0, 2.0, 3.0]))
|
||||||
_assert_roundtrip_array(h5, np.array(["A", "BB", "CCC"], dtype="S"))
|
_assert_roundtrip_array(h5, np.array(["A", "BB", "CCC"], dtype="S"))
|
||||||
assert h5.get_array("unknown-key") is None
|
assert h5.get_array("unknown-key") is None
|
||||||
|
|
||||||
_assert_roundtrip_sparse(
|
_assert_roundtrip_sparse(
|
||||||
h5,
|
h5,
|
||||||
coo_matrix(
|
coo_matrix(
|
||||||
[
|
[
|
||||||
[1.0, 0.0, 0.0],
|
[1.0, 0.0, 0.0],
|
||||||
[0.0, 2.0, 3.0],
|
[0.0, 2.0, 3.0],
|
||||||
[0.0, 0.0, 4.0],
|
[0.0, 0.0, 4.0],
|
||||||
],
|
],
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
assert h5.get_sparse("unknown-key") is None
|
assert h5.get_sparse("unknown-key") is None
|
||||||
|
|
||||||
|
|
||||||
def _assert_roundtrip_array(h5: H5File, original: np.ndarray) -> None:
|
def _assert_roundtrip_array(h5: H5File, original: np.ndarray) -> None:
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
# Copyright (C) 2020-2022, UChicago Argonne, LLC. All rights reserved.
|
# Copyright (C) 2020-2022, UChicago Argonne, LLC. All rights reserved.
|
||||||
# Released under the modified BSD license. See COPYING.md for more details.
|
# Released under the modified BSD license. See COPYING.md for more details.
|
||||||
|
|
||||||
from tempfile import NamedTemporaryFile
|
from tempfile import TemporaryDirectory
|
||||||
from typing import Callable, Any
|
from typing import Callable, Any
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
@@ -49,8 +49,8 @@ def _test_solver(build_model: Callable, data: Any) -> None:
|
|||||||
|
|
||||||
|
|
||||||
def _test_extract(model: AbstractModel) -> None:
|
def _test_extract(model: AbstractModel) -> None:
|
||||||
with NamedTemporaryFile() as tempfile:
|
with TemporaryDirectory() as tempdir:
|
||||||
with H5File(tempfile.name) as h5:
|
with H5File(f"{tempdir}/data.h5", "w") as h5:
|
||||||
|
|
||||||
def test_scalar(key: str, expected_value: Any) -> None:
|
def test_scalar(key: str, expected_value: Any) -> None:
|
||||||
actual_value = h5.get_scalar(key)
|
actual_value = h5.get_scalar(key)
|
||||||
@@ -129,7 +129,6 @@ def _test_extract(model: AbstractModel) -> None:
|
|||||||
test_scalar("mip_obj_value", 11.0)
|
test_scalar("mip_obj_value", 11.0)
|
||||||
mip_wallclock_time = h5.get_scalar("mip_wallclock_time")
|
mip_wallclock_time = h5.get_scalar("mip_wallclock_time")
|
||||||
assert mip_wallclock_time is not None
|
assert mip_wallclock_time is not None
|
||||||
assert mip_wallclock_time > 0
|
|
||||||
if model._supports_node_count:
|
if model._supports_node_count:
|
||||||
count = h5.get_scalar("mip_node_count")
|
count = h5.get_scalar("mip_node_count")
|
||||||
assert count is not None
|
assert count is not None
|
||||||
@@ -145,8 +144,8 @@ def _test_extract(model: AbstractModel) -> None:
|
|||||||
|
|
||||||
|
|
||||||
def _test_add_constr(model: AbstractModel) -> None:
|
def _test_add_constr(model: AbstractModel) -> None:
|
||||||
with NamedTemporaryFile() as tempfile:
|
with TemporaryDirectory() as tempdir:
|
||||||
with H5File(tempfile.name) as h5:
|
with H5File(f"{tempdir}/data.h5", "w") as h5:
|
||||||
model.add_constrs(
|
model.add_constrs(
|
||||||
np.array([b"x[2]", b"x[3]"], dtype="S"),
|
np.array([b"x[2]", b"x[3]"], dtype="S"),
|
||||||
np.array([[0, 1], [1, 0]]),
|
np.array([[0, 1], [1, 0]]),
|
||||||
@@ -161,8 +160,8 @@ def _test_add_constr(model: AbstractModel) -> None:
|
|||||||
|
|
||||||
|
|
||||||
def _test_fix_vars(model: AbstractModel) -> None:
|
def _test_fix_vars(model: AbstractModel) -> None:
|
||||||
with NamedTemporaryFile() as tempfile:
|
with TemporaryDirectory() as tempdir:
|
||||||
with H5File(tempfile.name) as h5:
|
with H5File(f"{tempdir}/data.h5", "w") as h5:
|
||||||
model.fix_variables(
|
model.fix_variables(
|
||||||
var_names=np.array([b"x[2]", b"x[3]"], dtype="S"),
|
var_names=np.array([b"x[2]", b"x[3]"], dtype="S"),
|
||||||
var_values=np.array([0, 0]),
|
var_values=np.array([0, 0]),
|
||||||
@@ -175,8 +174,8 @@ def _test_fix_vars(model: AbstractModel) -> None:
|
|||||||
|
|
||||||
|
|
||||||
def _test_infeasible(model: AbstractModel) -> None:
|
def _test_infeasible(model: AbstractModel) -> None:
|
||||||
with NamedTemporaryFile() as tempfile:
|
with TemporaryDirectory() as tempdir:
|
||||||
with H5File(tempfile.name) as h5:
|
with H5File(f"{tempdir}/data.h5", "w") as h5:
|
||||||
model.fix_variables(
|
model.fix_variables(
|
||||||
var_names=np.array([b"x[0]", b"x[3]"], dtype="S"),
|
var_names=np.array([b"x[0]", b"x[3]"], dtype="S"),
|
||||||
var_values=np.array([0, 0]),
|
var_values=np.array([0, 0]),
|
||||||
|
|||||||
Reference in New Issue
Block a user