diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..e7d5bdd --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +.nfs* diff --git a/0.3/.buildinfo b/0.3/.buildinfo index 5242ef2..a5784b5 100644 --- a/0.3/.buildinfo +++ b/0.3/.buildinfo @@ -1,4 +1,4 @@ # Sphinx build info version 1 # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. -config: 2d7b5c0d06fd9875399c14b694147ac6 +config: f1eab20ae8c8d57432d1dbf183c11c00 tags: d77d1c0d9ca2f4c8421862c7c5a0d620 diff --git a/0.3/_sources/api/collectors.rst.txt b/0.3/_sources/api/collectors.rst.txt index 40c8394..ca2072f 100644 --- a/0.3/_sources/api/collectors.rst.txt +++ b/0.3/_sources/api/collectors.rst.txt @@ -25,18 +25,18 @@ miplearn.collectors.basic :undoc-members: :show-inheritance: -miplearn.features.fields ------------------------- +miplearn.extractors.fields +-------------------------- -.. automodule:: miplearn.features.fields +.. automodule:: miplearn.extractors.fields :members: :undoc-members: :show-inheritance: -miplearn.features.AlvLouWeh2017 -------------------------------- +miplearn.extractors.AlvLouWeh2017 +--------------------------------- -.. automodule:: miplearn.features.AlvLouWeh2017 +.. automodule:: miplearn.extractors.AlvLouWeh2017 :members: :undoc-members: :show-inheritance: diff --git a/0.3/_sources/guide/collectors.ipynb.txt b/0.3/_sources/guide/collectors.ipynb.txt index 0d90c7b..7cc7b37 100644 --- a/0.3/_sources/guide/collectors.ipynb.txt +++ b/0.3/_sources/guide/collectors.ipynb.txt @@ -38,7 +38,10 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 3, + "metadata": { + "collapsed": false + }, "outputs": [ { "name": "stdout", @@ -95,20 +98,20 @@ " print(\"x3 =\", h5.get_array(\"x3\"))\n", " print(\"x4 =\", h5.get_array(\"x4\"))\n", " print(\"x5 =\", h5.get_sparse(\"x5\"))" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "markdown", - "source": [], "metadata": { "collapsed": false - } + }, + "source": [] }, { "cell_type": "markdown", + "metadata": { + "collapsed": false + }, "source": [ "## Basic collector\n", "\n", @@ -121,13 +124,13 @@ "Data extracted in Phases 1, 2 and 3 above are prefixed, respectively as `static_`, `lp_` and `mip_`. The entire set of fields is shown in the table below.\n", "\n", "[BasicCollector]: ../../api/collectors/#miplearn.collectors.basic.BasicCollector\n" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "markdown", + "metadata": { + "collapsed": false + }, "source": [ "### Data fields\n", "\n", @@ -161,39 +164,30 @@ "| `mip_obj_value` | `float` | Value of the best MIP solution |\n", "| `mip_var_values` | `(nvars,)` | Best MIP solution |\n", "| `mip_wallclock_time` | `float` | Time taken to solve the MIP (in seconds) |" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "markdown", + "metadata": { + "collapsed": false + }, "source": [ "### Example\n", "\n", "The example below shows how to generate a few random instances of the traveling salesman problem, store its problem data, run the collector and print some of the training data to screen." - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 4, + "metadata": { + "collapsed": false + }, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/home/axavier/Software/anaconda3/envs/miplearn/lib/python3.8/site-packages/tqdm/auto.py:22: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", - " from .autonotebook import tqdm as notebook_tqdm\n" - ] - }, { "name": "stdout", "output_type": "stream", "text": [ - "Removing empty/corrupted h5 file: data/tsp/00000.h5\n", "lp_obj_value = 2909.0\n", "mip_obj_value = 2921.0\n" ] @@ -209,7 +203,7 @@ " TravelingSalesmanGenerator,\n", " build_tsp_model,\n", ")\n", - "from miplearn.io import save\n", + "from miplearn.io import write_pkl_gz\n", "from miplearn.h5 import H5File\n", "from miplearn.collectors.basic import BasicCollector\n", "\n", @@ -228,30 +222,27 @@ ").generate(10)\n", "\n", "# Save instance data to data/tsp/00000.pkl.gz, data/tsp/00001.pkl.gz, ...\n", - "save(data, \"data/tsp\")\n", + "write_pkl_gz(data, \"data/tsp\")\n", "\n", - "# Solve all instances and collect basic solution information. Process at most four\n", - "# instances in parallel, with a per-instance time limit of one hour.\n", - "bc = BasicCollector(time_limit_sec=3600)\n", + "# Solve all instances and collect basic solution information.\n", + "# Process at most four instances in parallel.\n", + "bc = BasicCollector()\n", "bc.collect(glob(\"data/tsp/*.pkl.gz\"), build_tsp_model, n_jobs=4)\n", "\n", "# Read and print some training data for the first instance.\n", "with H5File(\"data/tsp/00000.h5\", \"r\") as h5:\n", " print(\"lp_obj_value = \", h5.get_scalar(\"lp_obj_value\"))\n", " print(\"mip_obj_value = \", h5.get_scalar(\"mip_obj_value\"))" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", - "execution_count": 2, - "outputs": [], - "source": [], + "execution_count": null, "metadata": { "collapsed": false - } + }, + "outputs": [], + "source": [] } ], "metadata": { @@ -270,7 +261,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.13" + "version": "3.9.12" } }, "nbformat": 4, diff --git a/0.3/_sources/guide/features.ipynb.txt b/0.3/_sources/guide/features.ipynb.txt index 70e937c..e191ad6 100644 --- a/0.3/_sources/guide/features.ipynb.txt +++ b/0.3/_sources/guide/features.ipynb.txt @@ -12,6 +12,9 @@ }, { "cell_type": "markdown", + "metadata": { + "collapsed": false + }, "source": [ "\n", "## Overview\n", @@ -24,37 +27,37 @@ "\n", "[FeatureExtractor]: ../../api/collectors/#miplearn.features.fields.FeaturesExtractor\n", "[H5File]: ../../api/helpers/#miplearn.h5.H5File" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "markdown", + "metadata": { + "collapsed": false + }, "source": [ "\n", "## H5FieldsExtractor\n", "\n", "[H5FieldsExtractor][H5FieldsExtractor], the most simple extractor in MIPLearn, simple extracts data that is already available in the HDF5 file, assembles it into a matrix and returns it as-is. The fields used to build instance, variable and constraint features are user-specified. The class also performs checks to ensure that the shapes of the returned matrices make sense." - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "markdown", + "metadata": { + "collapsed": false + }, "source": [ "### Example\n", "\n", "The example below demonstrates the usage of H5FieldsExtractor in a randomly generated instance of the multi-dimensional knapsack problem." - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 5, + "metadata": { + "collapsed": false + }, "outputs": [ { "name": "stdout", @@ -92,12 +95,12 @@ "from scipy.stats import uniform, randint\n", "\n", "from miplearn.collectors.basic import BasicCollector\n", - "from miplearn.features.fields import H5FieldsExtractor\n", + "from miplearn.extractors.fields import H5FieldsExtractor\n", "from miplearn.h5 import H5File\n", - "from miplearn.io import save\n", + "from miplearn.io import write_pkl_gz\n", "from miplearn.problems.multiknapsack import (\n", " MultiKnapsackGenerator,\n", - " build_multiknapsack_model\n", + " build_multiknapsack_model,\n", ")\n", "\n", "# Set random seed to make example reproducible\n", @@ -105,7 +108,7 @@ "\n", "# Generate some random multiknapsack instances\n", "rmtree(\"data/multiknapsack/\", ignore_errors=True)\n", - "save(\n", + "write_pkl_gz(\n", " MultiKnapsackGenerator(\n", " n=randint(low=10, high=11),\n", " m=randint(low=5, high=6),\n", @@ -117,7 +120,7 @@ " p_jitter=uniform(loc=0.75, scale=0.5),\n", " fix_w=True,\n", " ).generate(10),\n", - " \"data/multiknapsack\"\n", + " \"data/multiknapsack\",\n", ")\n", "\n", "# Run the basic collector\n", @@ -148,7 +151,7 @@ " \"static_constr_rhs\",\n", " \"lp_constr_dual_values\",\n", " \"lp_constr_slacks\",\n", - " ]\n", + " ],\n", ")\n", "\n", "with H5File(\"data/multiknapsack/00000.h5\") as h5:\n", @@ -163,50 +166,50 @@ " # Extract and print constraint features\n", " x3 = ext.get_constr_features(h5)\n", " print(\"constraint features\", x3.shape, \"\\n\", x3)\n" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "markdown", + "metadata": { + "collapsed": false + }, "source": [ "\n", "[H5FieldsExtractor]: ../../api/collectors/#miplearn.features.fields.H5FieldsExtractor" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "markdown", + "metadata": { + "collapsed": false + }, "source": [ "
\n", "Warning\n", "\n", "You should ensure that the number of features remains the same for all relevant HDF5 files. In the previous example, to illustrate this issue, we used variable objective coefficients as instance features. While this is allowed, note that this requires all problem instances to have the same number of variables; otherwise the number of features would vary from instance to instance and MIPLearn would be unable to concatenate the matrices.\n", "
" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "markdown", + "metadata": { + "collapsed": false + }, "source": [ "## AlvLouWeh2017Extractor\n", "\n", "Alvarez, Louveaux and Wehenkel (2017) proposed a set features to describe a particular decision variable in a given node of the branch-and-bound tree, and applied it to the problem of mimicking strong branching decisions. The class [AlvLouWeh2017Extractor][] implements a subset of these features (40 out of 64), which are available outside of the branch-and-bound tree. Some features are derived from the static defintion of the problem (i.e. from objective function and constraint data), while some features are derived from the solution to the LP relaxation. The features have been designed to be: (i) independent of the size of the problem; (ii) invariant with respect to irrelevant problem transformations, such as row and column permutation; and (iii) independent of the scale of the problem. We refer to the paper for a more complete description.\n", "\n", "### Example" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 6, + "metadata": { + "collapsed": false + }, "outputs": [ { "name": "stdout", @@ -277,7 +280,7 @@ } ], "source": [ - "from miplearn.features.AlvLouWeh2017 import AlvLouWeh2017Extractor\n", + "from miplearn.extractors.AlvLouWeh2017 import AlvLouWeh2017Extractor\n", "from miplearn.h5 import H5File\n", "\n", "# Build the extractor\n", @@ -285,17 +288,16 @@ "\n", "# Open previously-created multiknapsack training data\n", "with H5File(\"data/multiknapsack/00000.h5\") as h5:\n", - "\n", " # Extract and print variable features\n", " x1 = ext.get_var_features(h5)\n", " print(\"x1\", x1.shape, \"\\n\", x1.round(1))" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "markdown", + "metadata": { + "collapsed": false + }, "source": [ "
\n", "References\n", @@ -304,10 +306,7 @@ "* **Alvarez, Alejandro Marcos, Quentin Louveaux, and Louis Wehenkel.** *A machine learning-based approximation of strong branching.* INFORMS Journal on Computing 29.1 (2017): 185-195.\n", "\n", "
" - ], - "metadata": { - "collapsed": false - } + ] } ], "metadata": { @@ -326,7 +325,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.13" + "version": "3.9.12" } }, "nbformat": 4, diff --git a/0.3/_sources/guide/primal.ipynb.txt b/0.3/_sources/guide/primal.ipynb.txt index e9d8f45..040cffe 100644 --- a/0.3/_sources/guide/primal.ipynb.txt +++ b/0.3/_sources/guide/primal.ipynb.txt @@ -60,10 +60,16 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 1, + "id": "253adbf4", + "metadata": { + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } + }, "outputs": [], "source": [ - "\n", "from sklearn.dummy import DummyClassifier\n", "from sklearn.neighbors import KNeighborsClassifier\n", "\n", @@ -114,14 +120,13 @@ " extractor=H5FieldsExtractor(instance_fields=[\"static_var_obj_coeffs\"]),\n", " constructor=MergeTopSolutions(k=3, thresholds=[0.25, 0.75]),\n", " action=EnforceProximity(3),\n", - ")" - ], - "metadata": { - "collapsed": false - } + ")\n" + ] }, { "cell_type": "markdown", + "id": "f194a793", + "metadata": {}, "source": [ "## Independent vars primal component\n", "\n", @@ -136,14 +141,18 @@ "3. To make multiple copies of the provided ML classifier, MIPLearn uses the standard `sklearn.base.clone` method, which may not be suitable for classifiers from other frameworks. To handle this, it is possible to override the clone function using the `clone_fn` constructor argument.\n", "\n", "### Examples" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 2, + "id": "3fc0b5d1", + "metadata": { + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } + }, "outputs": [], "source": [ "from sklearn.linear_model import LogisticRegression\n", @@ -151,7 +160,6 @@ "from miplearn.classifiers.singleclass import SingleClassFix\n", "from miplearn.components.primal.indep import IndependentVarsPrimalComponent\n", "from miplearn.extractors.AlvLouWeh2017 import AlvLouWeh2017Extractor\n", - "from miplearn.solvers.learning import LearningSolver\n", "from miplearn.components.primal.actions import SetWarmStart\n", "\n", "# Configures a primal component that independently predicts the value of each\n", @@ -167,14 +175,13 @@ " ),\n", " extractor=AlvLouWeh2017Extractor(),\n", " action=SetWarmStart(),\n", - ")" - ], - "metadata": { - "collapsed": false - } + ")\n" + ] }, { "cell_type": "markdown", + "id": "45107a0c", + "metadata": {}, "source": [ "## Joint vars primal component\n", "In the previous subsection, we used multiple machine learning models to independently predict the values of the binary decision variables. When these values are correlated, an alternative approach is to jointly predict the values of all binary variables using a single machine learning model. This strategy is implemented by `JointVarsPrimalComponent`. Compared to the previous ones, this component is much more straightforwad. It simply extracts instance features, using the user-provided feature extractor, then directly trains the user-provided binary classifier (using the `fit` method), without making any copies. The trained classifier is then used to predict entire solutions (using the `predict` method), which are given to the solver using one of the previously discussed methods. In the example below, we illustrate the usage of this component with a simple feed-forward neural network.\n", @@ -182,21 +189,24 @@ "`JointVarsPrimalComponent` can also be used to implement strategies that use multiple machine learning models, but not indepedently. For example, a common strategy in multioutput prediction is building a *classifier chain*. In this approach, the first decision variable is predicted using the instance features alone; but the $n$-th decision variable is predicted using the instance features plus the predicted values of the $n-1$ previous variables. This can be easily implemented using scikit-learn's `ClassifierChain` estimator, as shown in the example below.\n", "\n", "### Examples" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 3, + "id": "cf9b52dd", + "metadata": { + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } + }, "outputs": [], "source": [ "from sklearn.multioutput import ClassifierChain\n", "from sklearn.neural_network import MLPClassifier\n", "from miplearn.components.primal.joint import JointVarsPrimalComponent\n", "from miplearn.extractors.fields import H5FieldsExtractor\n", - "from miplearn.solvers.learning import LearningSolver\n", "from miplearn.components.primal.actions import SetWarmStart\n", "\n", "# Configures a primal component that uses a feedforward neural network\n", @@ -206,7 +216,7 @@ "comp = JointVarsPrimalComponent(\n", " clf=MLPClassifier(),\n", " extractor=H5FieldsExtractor(\n", - " instance_fields=['static_var_obj_coeffs'],\n", + " instance_fields=[\"static_var_obj_coeffs\"],\n", " ),\n", " action=SetWarmStart(),\n", ")\n", @@ -217,31 +227,34 @@ "comp = JointVarsPrimalComponent(\n", " clf=ClassifierChain(SingleClassFix(LogisticRegression())),\n", " extractor=H5FieldsExtractor(\n", - " instance_fields=['static_var_obj_coeffs'],\n", + " instance_fields=[\"static_var_obj_coeffs\"],\n", " ),\n", " action=SetWarmStart(),\n", - ")" - ], - "metadata": { - "collapsed": false - } + ")\n" + ] }, { "cell_type": "markdown", + "id": "dddf7be4", + "metadata": {}, "source": [ "## Expert primal component\n", "\n", "Before spending time and effort choosing a machine learning strategy and tweaking its parameters, it is usually a good idea to evaluate what would be the performance impact of the model if its predictions were 100% accurate. This is especially important for the prediction of warm starts, since they are not always very beneficial. To simplify this task, MIPLearn provides `ExpertPrimalComponent`, a component which simply loads the optimal solution from the HDF5 file, assuming that it has already been computed, then directly provides it to the solver using one of the available methods. This component is useful in benchmarks, to evaluate how close to the best theoretical performance the machine learning components are.\n", "\n", "### Example" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", "execution_count": 4, + "id": "9e2e81b9", + "metadata": { + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } + }, "outputs": [], "source": [ "from miplearn.components.primal.expert import ExpertPrimalComponent\n", @@ -250,18 +263,13 @@ "# Configures an expert primal component, which reads a pre-computed\n", "# optimal solution from the HDF5 file and provides it to the solver\n", "# as warm start.\n", - "comp = ExpertPrimalComponent(\n", - " action=SetWarmStart()\n", - ")" - ], - "metadata": { - "collapsed": false - } + "comp = ExpertPrimalComponent(action=SetWarmStart())\n" + ] } ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -275,7 +283,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.13" + "version": "3.9.12" } }, "nbformat": 4, diff --git a/0.3/_sources/guide/problems.ipynb.txt b/0.3/_sources/guide/problems.ipynb.txt index e91b0d3..b24a519 100644 --- a/0.3/_sources/guide/problems.ipynb.txt +++ b/0.3/_sources/guide/problems.ipynb.txt @@ -18,6 +18,9 @@ }, { "cell_type": "markdown", + "metadata": { + "collapsed": false + }, "source": [ "
\n", "Warning\n", @@ -31,10 +34,7 @@ "- To make the instances easier to process, all formulations are written as a minimization problem.\n", "- Some problem formulations, such as the one for the *traveling salesman problem*, contain an exponential number of constraints, which are enforced through constraint generation. The MPS files for these problems contain only the constraints that were generated during a trial run, not the entire set of constraints. Resolving the MPS file, therefore, may not generate a feasible primal solution for the problem.\n", "
" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "markdown", @@ -63,6 +63,9 @@ }, { "cell_type": "markdown", + "metadata": { + "collapsed": false + }, "source": [ "\n", "$$\n", @@ -76,13 +79,13 @@ " & x_{ij} \\in \\{0,1\\} & \\forall i,j=1,\\ldots,n \\\\\n", "\\end{align*}\n", "$$" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "markdown", + "metadata": { + "collapsed": false + }, "source": [ "### Random instance generator\n", "\n", @@ -93,19 +96,16 @@ "If `fix_items=True`, the class creates a reference instance, using the method previously described, then generates additional instances by perturbing its item sizes and bin capacity. More specifically, the sizes of the items are set to $s_i \\gamma_i$, where $s_i$ is the size of the $i$-th item in the reference instance and $\\gamma_i$ is sampled from `sizes_jitter`. Similarly, the bin size is set to $B \\beta$, where $B$ is the reference bin size and $\\beta$ is sampled from `capacity_jitter`. The number of items remains the same across all generated instances.\n", "\n", "[BinPackGenerator]: ../../api/problems/#miplearn.problems.binpack.BinPackGenerator" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "markdown", - "source": [ - "### Example" - ], "metadata": { "collapsed": false - } + }, + "source": [ + "### Example" + ] }, { "cell_type": "code", @@ -130,9 +130,11 @@ "8 [ 8.47 21.9 16.58 15.37 3.76 3.91 1.57 20.57 14.76 18.61] 94.58\n", "9 [ 8.57 22.77 17.06 16.25 4.14 4. 1.56 22.97 14.09 19.09] 100.79\n", "\n", - "Restricted license - for non-production use only - expires 2023-10-25\n", - "Gurobi Optimizer version 9.5.2 build v9.5.2rc0 (linux64)\n", + "Gurobi Optimizer version 10.0.1 build v10.0.1rc0 (linux64)\n", + "\n", + "CPU model: AMD Ryzen 9 7950X 16-Core Processor, instruction set [SSE2|AVX|AVX2|AVX512]\n", "Thread count: 16 physical cores, 32 logical processors, using up to 32 threads\n", + "\n", "Optimize a model with 20 rows, 110 columns and 210 nonzeros\n", "Model fingerprint: 0x1ff9913f\n", "Variable types: 0 continuous, 110 integer (110 binary)\n", @@ -156,7 +158,7 @@ "H 0 0 2.0000000 1.27484 36.3% - 0s\n", " 0 0 1.27484 0 4 2.00000 1.27484 36.3% - 0s\n", "\n", - "Explored 1 nodes (38 simplex iterations) in 0.00 seconds (0.00 work units)\n", + "Explored 1 nodes (38 simplex iterations) in 0.01 seconds (0.00 work units)\n", "Thread count was 32 (of 32 available processors)\n", "\n", "Solution count 3: 2 4 5 \n", @@ -164,6 +166,14 @@ "Optimal solution found (tolerance 1.00e-04)\n", "Best objective 2.000000000000e+00, best bound 2.000000000000e+00, gap 0.0000%\n" ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/axavier/.conda/envs/miplearn2/lib/python3.9/site-packages/tqdm/auto.py:22: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", + " from .autonotebook import tqdm as notebook_tqdm\n" + ] } ], "source": [ @@ -191,7 +201,7 @@ "\n", "# Optimize first instance\n", "model = build_binpack_model(data[0])\n", - "model.optimize()" + "model.optimize()\n" ] }, { @@ -220,6 +230,9 @@ }, { "cell_type": "markdown", + "metadata": { + "collapsed": false + }, "source": [ "\n", "$$\n", @@ -234,10 +247,7 @@ " & \\forall j=1,\\ldots,n\n", "\\end{align*}\n", "$$" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "markdown", @@ -289,12 +299,12 @@ }, { "cell_type": "markdown", - "source": [ - "### Example" - ], "metadata": { "collapsed": false - } + }, + "source": [ + "### Example" + ] }, { "cell_type": "code", @@ -317,8 +327,11 @@ "capacities\n", " [1310. 988. 1004. 1269. 1007.]\n", "\n", - "Gurobi Optimizer version 9.5.2 build v9.5.2rc0 (linux64)\n", + "Gurobi Optimizer version 10.0.1 build v10.0.1rc0 (linux64)\n", + "\n", + "CPU model: AMD Ryzen 9 7950X 16-Core Processor, instruction set [SSE2|AVX|AVX2|AVX512]\n", "Thread count: 16 physical cores, 32 logical processors, using up to 32 threads\n", + "\n", "Optimize a model with 5 rows, 10 columns and 50 nonzeros\n", "Model fingerprint: 0xaf3ac15e\n", "Variable types: 0 continuous, 10 integer (10 binary)\n", @@ -344,7 +357,7 @@ "Cutting planes:\n", " Cover: 1\n", "\n", - "Explored 1 nodes (4 simplex iterations) in 0.00 seconds (0.00 work units)\n", + "Explored 1 nodes (4 simplex iterations) in 0.01 seconds (0.00 work units)\n", "Thread count was 32 (of 32 available processors)\n", "\n", "Solution count 2: -1279 -804 \n", @@ -388,7 +401,7 @@ "\n", "# Build model and optimize\n", "model = build_multiknapsack_model(data[0])\n", - "model.optimize()" + "model.optimize()\n" ] }, { @@ -453,12 +466,12 @@ }, { "cell_type": "markdown", - "source": [ - "### Example" - ], "metadata": { "collapsed": false - } + }, + "source": [ + "### Example" + ] }, { "cell_type": "code", @@ -485,8 +498,11 @@ "demands = [6.12 1.39 2.92 3.66 4.56 7.85 2. 5.14 5.92 0.46]\n", "capacities = [151.89 42.63 16.26 237.22 241.41 202.1 76.15 24.42 171.06 110.04]\n", "\n", - "Gurobi Optimizer version 9.5.2 build v9.5.2rc0 (linux64)\n", + "Gurobi Optimizer version 10.0.1 build v10.0.1rc0 (linux64)\n", + "\n", + "CPU model: AMD Ryzen 9 7950X 16-Core Processor, instruction set [SSE2|AVX|AVX2|AVX512]\n", "Thread count: 16 physical cores, 32 logical processors, using up to 32 threads\n", + "\n", "Optimize a model with 21 rows, 110 columns and 220 nonzeros\n", "Model fingerprint: 0x8d8d9346\n", "Variable types: 0 continuous, 110 integer (110 binary)\n", @@ -519,7 +535,7 @@ " 0 0 86.06884 0 15 93.92000 86.06884 8.36% - 0s\n", "* 0 0 0 91.2300000 91.23000 0.00% - 0s\n", "\n", - "Explored 1 nodes (60 simplex iterations) in 0.01 seconds (0.00 work units)\n", + "Explored 1 nodes (70 simplex iterations) in 0.02 seconds (0.00 work units)\n", "Thread count was 32 (of 32 available processors)\n", "\n", "Solution count 10: 91.23 93.92 93.98 ... 368.79\n", @@ -561,7 +577,7 @@ "\n", "# Build and optimize model\n", "model = build_pmedian_model(data[0])\n", - "model.optimize()" + "model.optimize()\n" ] }, { @@ -576,14 +592,14 @@ }, { "cell_type": "markdown", + "metadata": { + "collapsed": false + }, "source": [ "### Formulation\n", "\n", "Let $U = \\{1,\\ldots,n\\}$ be a given universe set, and let $S=\\{S_1,\\ldots,S_m\\}$ be a collection of sets whose union equal $U$. For each $j \\in \\{1,\\ldots,m\\}$, let $w_j$ be the weight of set $S_j$, and let $x_j$ be a binary decision variable that equals one if set $S_j$ is chosen. The set cover problem is formulated as:" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "markdown", @@ -650,8 +666,11 @@ "costs [1044.58 850.13 1014.5 944.83 697.9 971.87 213.49 220.98 70.23\n", " 425.33]\n", "\n", - "Gurobi Optimizer version 9.5.2 build v9.5.2rc0 (linux64)\n", + "Gurobi Optimizer version 10.0.1 build v10.0.1rc0 (linux64)\n", + "\n", + "CPU model: AMD Ryzen 9 7950X 16-Core Processor, instruction set [SSE2|AVX|AVX2|AVX512]\n", "Thread count: 16 physical cores, 32 logical processors, using up to 32 threads\n", + "\n", "Optimize a model with 5 rows, 10 columns and 28 nonzeros\n", "Model fingerprint: 0xe5c2d4fa\n", "Variable types: 0 continuous, 10 integer (10 binary)\n", @@ -678,7 +697,7 @@ "source": [ "import numpy as np\n", "from scipy.stats import uniform, randint\n", - "from miplearn.problems.setcover import SetCoverGenerator, build_setcover_model\n", + "from miplearn.problems.setcover import SetCoverGenerator, build_setcover_model_gurobipy\n", "\n", "# Set random seed, to make example reproducible\n", "np.random.seed(42)\n", @@ -702,8 +721,8 @@ "print()\n", "\n", "# Build and optimize model\n", - "model = build_setcover_model(data[0])\n", - "model.optimize()" + "model = build_setcover_model_gurobipy(data[0])\n", + "model.optimize()\n" ] }, { @@ -718,17 +737,20 @@ }, { "cell_type": "markdown", + "metadata": { + "collapsed": false + }, "source": [ "### Formulation\n", "\n", "Let $U=\\{1,\\ldots,n\\}$ be a given universe set, and let $S = \\{S_1, \\ldots, S_m\\}$ be a collection of subsets of $U$. For each subset $j \\in \\{1, \\ldots, m\\}$, let $w_j$ be the weight of $S_j$ and let $x_j$ be a binary decision variable which equals one if set $S_j$ is chosen. The problem is formulated as:" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "markdown", + "metadata": { + "collapsed": false + }, "source": [ "$$\n", "\\begin{align*}\n", @@ -740,13 +762,13 @@ " & x_j \\in \\{0, 1\\} & \\forall j \\in \\{1,\\ldots,m\\}\n", "\\end{align*}\n", "$$" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "markdown", + "metadata": { + "collapsed": false + }, "source": [ "### Random instance generator\n", "\n", @@ -756,14 +778,14 @@ "[SetCoverGenerator]: ../../api/problems/#miplearn.problems.setcover.SetCoverGenerator\n", "\n", "### Example" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", "execution_count": 5, + "metadata": { + "collapsed": false + }, "outputs": [ { "name": "stdout", @@ -778,8 +800,11 @@ "costs [1044.58 850.13 1014.5 944.83 697.9 971.87 213.49 220.98 70.23\n", " 425.33]\n", "\n", - "Gurobi Optimizer version 9.5.2 build v9.5.2rc0 (linux64)\n", + "Gurobi Optimizer version 10.0.1 build v10.0.1rc0 (linux64)\n", + "\n", + "CPU model: AMD Ryzen 9 7950X 16-Core Processor, instruction set [SSE2|AVX|AVX2|AVX512]\n", "Thread count: 16 physical cores, 32 logical processors, using up to 32 threads\n", + "\n", "Optimize a model with 5 rows, 10 columns and 28 nonzeros\n", "Model fingerprint: 0x4ee91388\n", "Variable types: 0 continuous, 10 integer (10 binary)\n", @@ -832,11 +857,8 @@ "\n", "# Build and optimize model\n", "model = build_setpack_model(data[0])\n", - "model.optimize()" - ], - "metadata": { - "collapsed": false - } + "model.optimize()\n" + ] }, { "cell_type": "markdown", @@ -854,6 +876,9 @@ }, { "cell_type": "markdown", + "metadata": { + "collapsed": false + }, "source": [ "$$\n", "\\begin{align*}\n", @@ -863,13 +888,13 @@ "\\end{align*}\n", "$$\n", "where $\\mathcal{C}$ is the set of cliques in $G$. We recall that a clique is a subset of vertices in which every pair of vertices is adjacent." - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "markdown", + "metadata": { + "collapsed": false + }, "source": [ "\n", "### Random instance generator\n", @@ -881,14 +906,11 @@ "If `fix_graph=True`, then all generated instances have the same random graph. For each instance, the weights are decided by sampling `w`, as described above.\n", "\n", "### Example" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", - "execution_count": 24, + "execution_count": 6, "id": "0f996e99-0ec9-472b-be8a-30c9b8556931", "metadata": {}, "outputs": [ @@ -900,8 +922,11 @@ "weights[0] [37.45 95.07 73.2 59.87 15.6 15.6 5.81 86.62 60.11 70.81]\n", "weights[1] [ 2.06 96.99 83.24 21.23 18.18 18.34 30.42 52.48 43.19 29.12]\n", "\n", - "Gurobi Optimizer version 9.5.2 build v9.5.2rc0 (linux64)\n", + "Gurobi Optimizer version 10.0.1 build v10.0.1rc0 (linux64)\n", + "\n", + "CPU model: AMD Ryzen 9 7950X 16-Core Processor, instruction set [SSE2|AVX|AVX2|AVX512]\n", "Thread count: 16 physical cores, 32 logical processors, using up to 32 threads\n", + "\n", "Optimize a model with 10 rows, 10 columns and 24 nonzeros\n", "Model fingerprint: 0xf4c21689\n", "Variable types: 0 continuous, 10 integer (10 binary)\n", @@ -923,7 +948,7 @@ "\n", " 0 0 infeasible 0 -219.14000 -219.14000 0.00% - 0s\n", "\n", - "Explored 1 nodes (4 simplex iterations) in 0.00 seconds (0.00 work units)\n", + "Explored 1 nodes (4 simplex iterations) in 0.01 seconds (0.00 work units)\n", "Thread count was 32 (of 32 available processors)\n", "\n", "Solution count 1: -219.14 \n", @@ -938,7 +963,10 @@ "import random\n", "import numpy as np\n", "from scipy.stats import uniform, randint\n", - "from miplearn.problems.stab import MaxWeightStableSetGenerator, build_stab_model\n", + "from miplearn.problems.stab import (\n", + " MaxWeightStableSetGenerator,\n", + " build_stab_model_gurobipy,\n", + ")\n", "\n", "# Set random seed to make example reproducible\n", "random.seed(42)\n", @@ -960,8 +988,8 @@ "print()\n", "\n", "# Load and optimize the first instance\n", - "model = build_stab_model(data[0])\n", - "model.optimize()" + "model = build_stab_model_gurobipy(data[0])\n", + "model.optimize()\n" ] }, { @@ -976,17 +1004,20 @@ }, { "cell_type": "markdown", + "metadata": { + "collapsed": false + }, "source": [ "### Formulation\n", "\n", "Let $G=(V,E)$ be a simple undirected graph. For each edge $e \\in E$, let $d_e$ be its weight (or distance) and let $x_e$ be a binary decision variable which equals one if $e$ is included in the route. The problem is formulated as:" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "markdown", + "metadata": { + "collapsed": false + }, "source": [ "$$\n", "\\begin{align*}\n", @@ -999,13 +1030,13 @@ "\\end{align*}\n", "$$\n", "where $\\delta(v)$ denotes the set of edges adjacent to vertex $v$, and $\\delta(S)$ denotes the set of edges that have one extremity in $S$ and one in $V \\setminus S$. Because of its exponential size, we enforce the second set of inequalities as lazy constraints." - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "markdown", + "metadata": { + "collapsed": false + }, "source": [ "### Random instance generator\n", "\n", @@ -1018,23 +1049,23 @@ "If `fix_cities=True`, then the list of cities is kept the same for all generated instances. The $\\gamma$ values, however, and therefore also the distances, are still different. By default, all distances $d_{ij}$ are rounded to the nearest integer. If `round=False` is provided, this rounding will be disabled.\n", "\n", "[TravelingSalesmanGenerator]: ../../api/problems/#miplearn.problems.tsp.TravelingSalesmanGenerator" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "markdown", - "source": [ - "### Example" - ], "metadata": { "collapsed": false - } + }, + "source": [ + "### Example" + ] }, { "cell_type": "code", - "execution_count": 32, + "execution_count": 7, + "metadata": { + "collapsed": false + }, "outputs": [ { "name": "stdout", @@ -1064,8 +1095,11 @@ " [ 668. 446. 317. 648. 469. 752. 394. 286. 274. 0.]]\n", "\n", "Set parameter LazyConstraints to value 1\n", - "Gurobi Optimizer version 9.5.2 build v9.5.2rc0 (linux64)\n", + "Gurobi Optimizer version 10.0.1 build v10.0.1rc0 (linux64)\n", + "\n", + "CPU model: AMD Ryzen 9 7950X 16-Core Processor, instruction set [SSE2|AVX|AVX2|AVX512]\n", "Thread count: 16 physical cores, 32 logical processors, using up to 32 threads\n", + "\n", "Optimize a model with 10 rows, 45 columns and 90 nonzeros\n", "Model fingerprint: 0x719675e5\n", "Variable types: 0 continuous, 45 integer (45 binary)\n", @@ -1096,7 +1130,7 @@ "Optimal solution found (tolerance 1.00e-04)\n", "Best objective 2.921000000000e+03, best bound 2.921000000000e+03, gap 0.0000%\n", "\n", - "User-callback calls 100, time in user-callback 0.00 sec\n" + "User-callback calls 106, time in user-callback 0.00 sec\n" ] } ], @@ -1128,11 +1162,8 @@ "\n", "# Load and optimize the first instance\n", "model = build_tsp_model(data[0])\n", - "model.optimize()" - ], - "metadata": { - "collapsed": false - } + "model.optimize()\n" + ] }, { "cell_type": "markdown", @@ -1146,6 +1177,9 @@ }, { "cell_type": "markdown", + "metadata": { + "collapsed": false + }, "source": [ "\n", "
\n", @@ -1157,13 +1191,13 @@ "### Formulation\n", "\n", "Let $T$ be the number of time steps, $G$ be the number of generation units, and let $D_t$ be the power demand (in MW) at time $t$. For each generating unit $g$, let $P^\\max_g$ and $P^\\min_g$ be the maximum and minimum amount of power the unit is able to produce when switched on; let $L_g$ and $l_g$ be the minimum up- and down-time for unit $g$; let $C^\\text{fixed}$ be the cost to keep unit $g$ on for one time step, regardless of its power output level; let $C^\\text{start}$ be the cost to switch unit $g$ on; and let $C^\\text{var}$ be the cost for generator $g$ to produce 1 MW of power. In this formulation, we assume linear production costs. For each generator $g$ and time $t$, let $x_{gt}$ be a binary variable which equals one if unit $g$ is on at time $t$, let $w_{gt}$ be a binary variable which equals one if unit $g$ switches from being off at time $t-1$ to being on at time $t$, and let $p_{gt}$ be a continuous variable which indicates the amount of power generated. The formulation is given by:" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "markdown", + "metadata": { + "collapsed": false + }, "source": [ "\n", "$$\n", @@ -1192,13 +1226,13 @@ " & \\forall g, t.\n", "\\end{align*}\n", "$$" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "markdown", + "metadata": { + "collapsed": false + }, "source": [ "\n", "The first set of inequalities enforces minimum up-time constraints: if unit $g$ is down at time $t$, then it cannot start up during the previous $L_g$ time steps. The second set of inequalities enforces minimum down-time constraints, and is symmetrical to the previous one. The third set ensures that if unit $g$ starts up at time $t$, then the start up variable must be one. The fourth set ensures that demand is satisfied at each time period. The fifth and sixth sets enforce bounds to the quantity of power generated by each unit.\n", @@ -1208,13 +1242,13 @@ "\n", "- *Bendotti, P., Fouilhoux, P. & Rottner, C.* **The min-up/min-down unit commitment polytope.** J Comb Optim 36, 1024-1058 (2018). https://doi.org/10.1007/s10878-018-0273-y\n", "
" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "markdown", + "metadata": { + "collapsed": false + }, "source": [ "\n", "### Random instance generator\n", @@ -1226,23 +1260,23 @@ "After parameters for the units have been generated, the class then generates a periodic demand curve, with a peak every 12 time steps, in the range $(0.4C, 0.8C)$, where $C$ is the sum of all units' maximum power output. Finally, all costs and demand values are perturbed by random scaling factors independently sampled from the distributions `cost_jitter` and `demand_jitter`, respectively.\n", "\n", "If `fix_units=True`, then the list of generators (with their respective parameters) is kept the same for all generated instances. If `cost_jitter` and `demand_jitter` are provided, the instances will still have slightly different costs and demands." - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "markdown", - "source": [ - "### Example" - ], "metadata": { "collapsed": false - } + }, + "source": [ + "### Example" + ] }, { "cell_type": "code", "execution_count": 8, + "metadata": { + "collapsed": false + }, "outputs": [ { "name": "stdout", @@ -1274,8 +1308,11 @@ " 828.28 775.18 834.99 959.76 865.72 1193.52 1058.92 985.19 893.92\n", " 962.16 781.88 723.15 639.04 602.4 787.02]\n", "\n", - "Gurobi Optimizer version 9.5.2 build v9.5.2rc0 (linux64)\n", - "Thread count: 6 physical cores, 12 logical processors, using up to 12 threads\n", + "Gurobi Optimizer version 10.0.1 build v10.0.1rc0 (linux64)\n", + "\n", + "CPU model: AMD Ryzen 9 7950X 16-Core Processor, instruction set [SSE2|AVX|AVX2|AVX512]\n", + "Thread count: 16 physical cores, 32 logical processors, using up to 32 threads\n", + "\n", "Optimize a model with 578 rows, 360 columns and 2128 nonzeros\n", "Model fingerprint: 0x4dc1c661\n", "Variable types: 120 continuous, 240 integer (240 binary)\n", @@ -1285,22 +1322,22 @@ " Bounds range [1e+00, 1e+00]\n", " RHS range [1e+00, 1e+03]\n", "Presolve removed 244 rows and 131 columns\n", - "Presolve time: 0.01s\n", + "Presolve time: 0.02s\n", "Presolved: 334 rows, 229 columns, 842 nonzeros\n", "Variable types: 116 continuous, 113 integer (113 binary)\n", - "Found heuristic solution: objective 441426.66550\n", + "Found heuristic solution: objective 440662.46430\n", "Found heuristic solution: objective 429461.97680\n", "Found heuristic solution: objective 374043.64040\n", "\n", - "Root relaxation: objective 3.361348e+05, 139 iterations, 0.00 seconds (0.00 work units)\n", + "Root relaxation: objective 3.361348e+05, 142 iterations, 0.00 seconds (0.00 work units)\n", "\n", " Nodes | Current Node | Objective Bounds | Work\n", " Expl Unexpl | Obj Depth IntInf | Incumbent BestBd Gap | It/Node Time\n", "\n", " 0 0 336134.820 0 18 374043.640 336134.820 10.1% - 0s\n", "H 0 0 368600.14450 336134.820 8.81% - 0s\n", - "H 0 0 364721.76610 364721.766 0.00% - 0s\n", - " 0 0 - 0 364721.766 364721.766 0.00% - 0s\n", + "H 0 0 364721.76610 336134.820 7.84% - 0s\n", + " 0 0 cutoff 0 364721.766 364721.766 0.00% - 0s\n", "\n", "Cutting planes:\n", " Gomory: 3\n", @@ -1312,10 +1349,10 @@ " RLT: 1\n", " Relax-and-lift: 7\n", "\n", - "Explored 1 nodes (232 simplex iterations) in 0.04 seconds (0.02 work units)\n", - "Thread count was 12 (of 12 available processors)\n", + "Explored 1 nodes (234 simplex iterations) in 0.04 seconds (0.02 work units)\n", + "Thread count was 32 (of 32 available processors)\n", "\n", - "Solution count 5: 364722 368600 374044 ... 441427\n", + "Solution count 5: 364722 368600 374044 ... 440662\n", "\n", "Optimal solution found (tolerance 1.00e-04)\n", "Best objective 3.647217661000e+05, best bound 3.647217661000e+05, gap 0.0000%\n" @@ -1363,11 +1400,8 @@ "\n", "# Load and optimize the first instance\n", "model = build_uc_model(data[0])\n", - "model.optimize()" - ], - "metadata": { - "collapsed": false - } + "model.optimize()\n" + ] }, { "cell_type": "markdown", @@ -1381,18 +1415,21 @@ }, { "cell_type": "markdown", + "metadata": { + "collapsed": false + }, "source": [ "\n", "### Formulation\n", "\n", "Let $G=(V,E)$ be a simple graph. For each vertex $v \\in V$, let $w_g$ be its weight, and let $x_v$ be a binary decision variable which equals one if $v$ is included in the cover. The mixed-integer linear formulation for the problem is given by:" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "markdown", + "metadata": { + "collapsed": false + }, "source": [ " $$\n", "\\begin{align*}\n", @@ -1404,13 +1441,13 @@ " & \\forall \\{i,j\\} \\in E.\n", "\\end{align*}\n", "$$" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "markdown", + "metadata": { + "collapsed": false + }, "source": [ "### Random instance generator\n", "\n", @@ -1420,10 +1457,7 @@ "[MaxWeightStableSetGenerator]: ../../api/problems/#miplearn.problems.stab.MaxWeightStableSetGenerator\n", "\n", "### Example" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", @@ -1439,8 +1473,11 @@ "weights[0] [37.45 95.07 73.2 59.87 15.6 15.6 5.81 86.62 60.11 70.81]\n", "weights[1] [ 2.06 96.99 83.24 21.23 18.18 18.34 30.42 52.48 43.19 29.12]\n", "\n", - "Gurobi Optimizer version 9.5.2 build v9.5.2rc0 (linux64)\n", - "Thread count: 6 physical cores, 12 logical processors, using up to 12 threads\n", + "Gurobi Optimizer version 10.0.1 build v10.0.1rc0 (linux64)\n", + "\n", + "CPU model: AMD Ryzen 9 7950X 16-Core Processor, instruction set [SSE2|AVX|AVX2|AVX512]\n", + "Thread count: 16 physical cores, 32 logical processors, using up to 32 threads\n", + "\n", "Optimize a model with 15 rows, 10 columns and 30 nonzeros\n", "Model fingerprint: 0x2d2d1390\n", "Variable types: 0 continuous, 10 integer (10 binary)\n", @@ -1463,7 +1500,7 @@ " 0 0 infeasible 0 301.00000 301.00000 0.00% - 0s\n", "\n", "Explored 1 nodes (8 simplex iterations) in 0.01 seconds (0.00 work units)\n", - "Thread count was 12 (of 12 available processors)\n", + "Thread count was 32 (of 32 available processors)\n", "\n", "Solution count 1: 301 \n", "\n", @@ -1476,7 +1513,10 @@ "import random\n", "import numpy as np\n", "from scipy.stats import uniform, randint\n", - "from miplearn.problems.vertexcover import MinWeightVertexCoverGenerator, build_vertexcover_model\n", + "from miplearn.problems.vertexcover import (\n", + " MinWeightVertexCoverGenerator,\n", + " build_vertexcover_model,\n", + ")\n", "\n", "# Set random seed to make example reproducible\n", "random.seed(42)\n", @@ -1505,11 +1545,11 @@ { "cell_type": "code", "execution_count": null, - "outputs": [], - "source": [], "metadata": { "collapsed": false - } + }, + "outputs": [], + "source": [] } ], "metadata": { @@ -1528,7 +1568,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.13" + "version": "3.9.12" } }, "nbformat": 4, diff --git a/0.3/_sources/guide/solvers.ipynb.txt b/0.3/_sources/guide/solvers.ipynb.txt index 40d1709..b78fcc4 100644 --- a/0.3/_sources/guide/solvers.ipynb.txt +++ b/0.3/_sources/guide/solvers.ipynb.txt @@ -14,12 +14,28 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 1, + "id": "92b09b98", + "metadata": { + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } + }, "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/axavier/Software/anaconda3/envs/miplearn/lib/python3.8/site-packages/tqdm/auto.py:22: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", + " from .autonotebook import tqdm as notebook_tqdm\n" + ] + }, { "name": "stdout", "output_type": "stream", "text": [ + "Restricted license - for non-production use only - expires 2023-10-25\n", "Gurobi Optimizer version 9.5.2 build v9.5.2rc0 (linux64)\n", "Thread count: 6 physical cores, 12 logical processors, using up to 12 threads\n", "Optimize a model with 10 rows, 45 columns and 90 nonzeros\n", @@ -67,7 +83,7 @@ "Cutting planes:\n", " Lazy constraints: 3\n", "\n", - "Explored 1 nodes (15 simplex iterations) in 0.02 seconds (0.00 work units)\n", + "Explored 1 nodes (15 simplex iterations) in 0.01 seconds (0.00 work units)\n", "Thread count was 12 (of 12 available processors)\n", "\n", "Solution count 1: 2796 \n", @@ -148,7 +164,7 @@ "test_data = all_data[40:]\n", "\n", "# Collect training data\n", - "bc = BasicCollector(time_limit_sec=3600)\n", + "bc = BasicCollector()\n", "bc.collect(train_data, build_tsp_model, n_jobs=4)\n", "\n", "# Build learning solver\n", @@ -172,14 +188,11 @@ "\n", "# Solve a test instance\n", "solver.optimize(test_data[0], build_tsp_model);" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 1, "id": "e27d2cbd-5341-461d-bbc1-8131aee8d949", "metadata": {}, "outputs": [], @@ -188,7 +201,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, diff --git a/0.3/_sources/index.rst.txt b/0.3/_sources/index.rst.txt index 92c13f8..2a77d95 100644 --- a/0.3/_sources/index.rst.txt +++ b/0.3/_sources/index.rst.txt @@ -8,6 +8,15 @@ Unlike pure ML methods, MIPLearn is not only able to find high-quality solutions Contents -------- +.. toctree:: + :maxdepth: 10 + :caption: Tutorials + :numbered: 2 + + tutorials/getting-started-pyomo + tutorials/getting-started-gurobipy + tutorials/getting-started-jump + .. toctree:: :maxdepth: 10 :caption: User Guide @@ -21,7 +30,7 @@ Contents .. toctree:: :maxdepth: 10 - :caption: API Reference + :caption: Python API Reference :numbered: 2 api/problems diff --git a/0.3/_sources/tutorials/getting-started-gurobipy.ipynb.txt b/0.3/_sources/tutorials/getting-started-gurobipy.ipynb.txt new file mode 100644 index 0000000..47d51dd --- /dev/null +++ b/0.3/_sources/tutorials/getting-started-gurobipy.ipynb.txt @@ -0,0 +1,849 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "6b8983b1", + "metadata": { + "tags": [] + }, + "source": [ + "# Getting started (Gurobipy)\n", + "\n", + "## Introduction\n", + "\n", + "**MIPLearn** is an open source framework that uses machine learning (ML) to accelerate the performance of mixed-integer programming solvers (e.g. Gurobi, CPLEX, XPRESS). In this tutorial, we will:\n", + "\n", + "1. Install the Python/Gurobipy version of MIPLearn\n", + "2. Model a simple optimization problem using Gurobipy\n", + "3. Generate training data and train the ML models\n", + "4. Use the ML models together Gurobi to solve new instances\n", + "\n", + "
\n", + "Note\n", + " \n", + "The Python/Gurobipy version of MIPLearn is only compatible with the Gurobi Optimizer. For broader solver compatibility, see the Python/Pyomo and Julia/JuMP versions of the package.\n", + "
\n", + "\n", + "
\n", + "Warning\n", + " \n", + "MIPLearn is still in early development stage. If run into any bugs or issues, please submit a bug report in our GitHub repository. Comments, suggestions and pull requests are also very welcome!\n", + " \n", + "
\n" + ] + }, + { + "cell_type": "markdown", + "id": "02f0a927", + "metadata": {}, + "source": [ + "## Installation\n", + "\n", + "MIPLearn is available in two versions:\n", + "\n", + "- Python version, compatible with the Pyomo and Gurobipy modeling languages,\n", + "- Julia version, compatible with the JuMP modeling language.\n", + "\n", + "In this tutorial, we will demonstrate how to use and install the Python/Gurobipy version of the package. The first step is to install Python 3.8+ in your computer. See the [official Python website for more instructions](https://www.python.org/downloads/). After Python is installed, we proceed to install MIPLearn using `pip`:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "cd8a69c1", + "metadata": { + "ExecuteTime": { + "end_time": "2023-06-06T20:18:02.381829278Z", + "start_time": "2023-06-06T20:18:02.381532300Z" + } + }, + "outputs": [], + "source": [ + "# !pip install MIPLearn==0.3.0" + ] + }, + { + "cell_type": "markdown", + "id": "e8274543", + "metadata": {}, + "source": [ + "In addition to MIPLearn itself, we will also install Gurobi 10.0, a state-of-the-art commercial MILP solver. This step also install a demo license for Gurobi, which should able to solve the small optimization problems in this tutorial. A license is required for solving larger-scale problems." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "dcc8756c", + "metadata": { + "ExecuteTime": { + "end_time": "2023-06-06T20:18:15.537811992Z", + "start_time": "2023-06-06T20:18:13.449177860Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Requirement already satisfied: gurobipy<10.1,>=10 in /home/axavier/Software/anaconda3/envs/miplearn/lib/python3.8/site-packages (10.0.1)\n" + ] + } + ], + "source": [ + "!pip install 'gurobipy>=10,<10.1'" + ] + }, + { + "cell_type": "markdown", + "id": "a14e4550", + "metadata": {}, + "source": [ + "
\n", + " \n", + "Note\n", + " \n", + "In the code above, we install specific version of all packages to ensure that this tutorial keeps running in the future, even when newer (and possibly incompatible) versions of the packages are released. This is usually a recommended practice for all Python projects.\n", + " \n", + "
" + ] + }, + { + "cell_type": "markdown", + "id": "16b86823", + "metadata": {}, + "source": [ + "## Modeling a simple optimization problem\n", + "\n", + "To illustrate how can MIPLearn be used, we will model and solve a small optimization problem related to power systems optimization. The problem we discuss below is a simplification of the **unit commitment problem,** a practical optimization problem solved daily by electric grid operators around the world. \n", + "\n", + "Suppose that a utility company needs to decide which electrical generators should be online at each hour of the day, as well as how much power should each generator produce. More specifically, assume that the company owns $n$ generators, denoted by $g_1, \\ldots, g_n$. Each generator can either be online or offline. An online generator $g_i$ can produce between $p^\\text{min}_i$ to $p^\\text{max}_i$ megawatts of power, and it costs the company $c^\\text{fix}_i + c^\\text{var}_i y_i$, where $y_i$ is the amount of power produced. An offline generator produces nothing and costs nothing. The total amount of power to be produced needs to be exactly equal to the total demand $d$ (in megawatts).\n", + "\n", + "This simple problem can be modeled as a *mixed-integer linear optimization* problem as follows. For each generator $g_i$, let $x_i \\in \\{0,1\\}$ be a decision variable indicating whether $g_i$ is online, and let $y_i \\geq 0$ be a decision variable indicating how much power does $g_i$ produce. The problem is then given by:" + ] + }, + { + "cell_type": "markdown", + "id": "f12c3702", + "metadata": {}, + "source": [ + "$$\n", + "\\begin{align}\n", + "\\text{minimize } \\quad & \\sum_{i=1}^n \\left( c^\\text{fix}_i x_i + c^\\text{var}_i y_i \\right) \\\\\n", + "\\text{subject to } \\quad & y_i \\leq p^\\text{max}_i x_i & i=1,\\ldots,n \\\\\n", + "& y_i \\geq p^\\text{min}_i x_i & i=1,\\ldots,n \\\\\n", + "& \\sum_{i=1}^n y_i = d \\\\\n", + "& x_i \\in \\{0,1\\} & i=1,\\ldots,n \\\\\n", + "& y_i \\geq 0 & i=1,\\ldots,n\n", + "\\end{align}\n", + "$$" + ] + }, + { + "cell_type": "markdown", + "id": "be3989ed", + "metadata": {}, + "source": [ + "
\n", + "\n", + "Note\n", + "\n", + "We use a simplified version of the unit commitment problem in this tutorial just to make it easier to follow. MIPLearn can also handle realistic, large-scale versions of this problem.\n", + "\n", + "
" + ] + }, + { + "cell_type": "markdown", + "id": "a5fd33f6", + "metadata": {}, + "source": [ + "Next, let us convert this abstract mathematical formulation into a concrete optimization model, using Python and Pyomo. We start by defining a data class `UnitCommitmentData`, which holds all the input data." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "22a67170-10b4-43d3-8708-014d91141e73", + "metadata": { + "ExecuteTime": { + "end_time": "2023-06-06T20:18:25.442346786Z", + "start_time": "2023-06-06T20:18:25.329017476Z" + }, + "tags": [] + }, + "outputs": [], + "source": [ + "from dataclasses import dataclass\n", + "from typing import List\n", + "\n", + "import numpy as np\n", + "\n", + "\n", + "@dataclass\n", + "class UnitCommitmentData:\n", + " demand: float\n", + " pmin: List[float]\n", + " pmax: List[float]\n", + " cfix: List[float]\n", + " cvar: List[float]" + ] + }, + { + "cell_type": "markdown", + "id": "29f55efa-0751-465a-9b0a-a821d46a3d40", + "metadata": {}, + "source": [ + "Next, we write a `build_uc_model` function, which converts the input data into a concrete Pyomo model. The function accepts `UnitCommitmentData`, the data structure we previously defined, or the path to a compressed pickle file containing this data." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "2f67032f-0d74-4317-b45c-19da0ec859e9", + "metadata": { + "ExecuteTime": { + "end_time": "2023-06-06T20:48:05.953902842Z", + "start_time": "2023-06-06T20:48:05.909747925Z" + } + }, + "outputs": [], + "source": [ + "import gurobipy as gp\n", + "from gurobipy import GRB, quicksum\n", + "from typing import Union\n", + "from miplearn.io import read_pkl_gz\n", + "from miplearn.solvers.gurobi import GurobiModel\n", + "\n", + "def build_uc_model(data: Union[str, UnitCommitmentData]) -> GurobiModel:\n", + " if isinstance(data, str):\n", + " data = read_pkl_gz(data)\n", + "\n", + " model = gp.Model()\n", + " n = len(data.pmin)\n", + " x = model._x = model.addVars(n, vtype=GRB.BINARY, name=\"x\")\n", + " y = model._y = model.addVars(n, name=\"y\")\n", + " model.setObjective(\n", + " quicksum(\n", + " data.cfix[i] * x[i] + data.cvar[i] * y[i] for i in range(n)\n", + " )\n", + " )\n", + " model.addConstrs(y[i] <= data.pmax[i] * x[i] for i in range(n))\n", + " model.addConstrs(y[i] >= data.pmin[i] * x[i] for i in range(n))\n", + " model.addConstr(quicksum(y[i] for i in range(n)) == data.demand)\n", + " return GurobiModel(model)" + ] + }, + { + "cell_type": "markdown", + "id": "c22714a3", + "metadata": {}, + "source": [ + "At this point, we can already use Pyomo and any mixed-integer linear programming solver to find optimal solutions to any instance of this problem. To illustrate this, let us solve a small instance with three generators:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "2a896f47", + "metadata": { + "ExecuteTime": { + "end_time": "2023-06-06T20:49:14.266758244Z", + "start_time": "2023-06-06T20:49:14.223514806Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Restricted license - for non-production use only - expires 2024-10-28\n", + "Gurobi Optimizer version 10.0.1 build v10.0.1rc0 (linux64)\n", + "\n", + "CPU model: Intel(R) Core(TM) i7-8750H CPU @ 2.20GHz, instruction set [SSE2|AVX|AVX2]\n", + "Thread count: 6 physical cores, 12 logical processors, using up to 12 threads\n", + "\n", + "Optimize a model with 7 rows, 6 columns and 15 nonzeros\n", + "Model fingerprint: 0x58dfdd53\n", + "Variable types: 3 continuous, 3 integer (3 binary)\n", + "Coefficient statistics:\n", + " Matrix range [1e+00, 7e+01]\n", + " Objective range [2e+00, 7e+02]\n", + " Bounds range [1e+00, 1e+00]\n", + " RHS range [1e+02, 1e+02]\n", + "Presolve removed 2 rows and 1 columns\n", + "Presolve time: 0.00s\n", + "Presolved: 5 rows, 5 columns, 13 nonzeros\n", + "Variable types: 0 continuous, 5 integer (3 binary)\n", + "Found heuristic solution: objective 1400.0000000\n", + "\n", + "Root relaxation: objective 1.035000e+03, 3 iterations, 0.00 seconds (0.00 work units)\n", + "\n", + " Nodes | Current Node | Objective Bounds | Work\n", + " Expl Unexpl | Obj Depth IntInf | Incumbent BestBd Gap | It/Node Time\n", + "\n", + " 0 0 1035.00000 0 1 1400.00000 1035.00000 26.1% - 0s\n", + " 0 0 1105.71429 0 1 1400.00000 1105.71429 21.0% - 0s\n", + "* 0 0 0 1320.0000000 1320.00000 0.00% - 0s\n", + "\n", + "Explored 1 nodes (5 simplex iterations) in 0.01 seconds (0.00 work units)\n", + "Thread count was 12 (of 12 available processors)\n", + "\n", + "Solution count 2: 1320 1400 \n", + "\n", + "Optimal solution found (tolerance 1.00e-04)\n", + "Best objective 1.320000000000e+03, best bound 1.320000000000e+03, gap 0.0000%\n", + "obj = 1320.0\n", + "x = [-0.0, 1.0, 1.0]\n", + "y = [0.0, 60.0, 40.0]\n" + ] + } + ], + "source": [ + "model = build_uc_model(\n", + " UnitCommitmentData(\n", + " demand=100.0,\n", + " pmin=[10, 20, 30],\n", + " pmax=[50, 60, 70],\n", + " cfix=[700, 600, 500],\n", + " cvar=[1.5, 2.0, 2.5],\n", + " )\n", + ")\n", + "\n", + "model.optimize()\n", + "print(\"obj =\", model.inner.objVal)\n", + "print(\"x =\", [model.inner._x[i].x for i in range(3)])\n", + "print(\"y =\", [model.inner._y[i].x for i in range(3)])" + ] + }, + { + "cell_type": "markdown", + "id": "41b03bbc", + "metadata": {}, + "source": [ + "Running the code above, we found that the optimal solution for our small problem instance costs \\$1320. It is achieve by keeping generators 2 and 3 online and producing, respectively, 60 MW and 40 MW of power." + ] + }, + { + "cell_type": "markdown", + "id": "01f576e1-1790-425e-9e5c-9fa07b6f4c26", + "metadata": {}, + "source": [ + "
\n", + " \n", + "Note\n", + "\n", + "- In the example above, `GurobiModel` is just a thin wrapper around a standard Gurobi model. This wrapper allows MIPLearn to be solver- and modeling-language-agnostic. The wrapper provides only a few basic methods, such as `optimize`. For more control, and to query the solution, the original Gurobi model can be accessed through `model.inner`, as illustrated above.\n", + "- To ensure training data consistency, MIPLearn requires all decision variables to have names.\n", + "
" + ] + }, + { + "cell_type": "markdown", + "id": "cf60c1dd", + "metadata": {}, + "source": [ + "## Generating training data\n", + "\n", + "Although Gurobi could solve the small example above in a fraction of a second, it gets slower for larger and more complex versions of the problem. If this is a problem that needs to be solved frequently, as it is often the case in practice, it could make sense to spend some time upfront generating a **trained** solver, which can optimize new instances (similar to the ones it was trained on) faster.\n", + "\n", + "In the following, we will use MIPLearn to train machine learning models that is able to predict the optimal solution for instances that follow a given probability distribution, then it will provide this predicted solution to Gurobi as a warm start. Before we can train the model, we need to collect training data by solving a large number of instances. In real-world situations, we may construct these training instances based on historical data. In this tutorial, we will construct them using a random instance generator:" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "5eb09fab", + "metadata": { + "ExecuteTime": { + "end_time": "2023-06-06T20:49:22.758192368Z", + "start_time": "2023-06-06T20:49:22.724784572Z" + } + }, + "outputs": [], + "source": [ + "from scipy.stats import uniform\n", + "from typing import List\n", + "import random\n", + "\n", + "\n", + "def random_uc_data(samples: int, n: int, seed: int = 42) -> List[UnitCommitmentData]:\n", + " random.seed(seed)\n", + " np.random.seed(seed)\n", + " pmin = uniform(loc=100_000.0, scale=400_000.0).rvs(n)\n", + " pmax = pmin * uniform(loc=2.0, scale=2.5).rvs(n)\n", + " cfix = pmin * uniform(loc=100.0, scale=25.0).rvs(n)\n", + " cvar = uniform(loc=1.25, scale=0.25).rvs(n)\n", + " return [\n", + " UnitCommitmentData(\n", + " demand=pmax.sum() * uniform(loc=0.5, scale=0.25).rvs(),\n", + " pmin=pmin,\n", + " pmax=pmax,\n", + " cfix=cfix,\n", + " cvar=cvar,\n", + " )\n", + " for _ in range(samples)\n", + " ]" + ] + }, + { + "cell_type": "markdown", + "id": "3a03a7ac", + "metadata": {}, + "source": [ + "In this example, for simplicity, only the demands change from one instance to the next. We could also have randomized the costs, production limits or even the number of units. The more randomization we have in the training data, however, the more challenging it is for the machine learning models to learn solution patterns.\n", + "\n", + "Now we generate 500 instances of this problem, each one with 50 generators, and we use 450 of these instances for training. After generating the instances, we write them to individual files. MIPLearn uses files during the training process because, for large-scale optimization problems, it is often impractical to hold in memory the entire training data, as well as the concrete Pyomo models. Files also make it much easier to solve multiple instances simultaneously, potentially on multiple machines. The code below generates the files `uc/train/00000.pkl.gz`, `uc/train/00001.pkl.gz`, etc., which contain the input data in compressed (gzipped) pickle format." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "6156752c", + "metadata": { + "ExecuteTime": { + "end_time": "2023-06-06T20:49:24.811192929Z", + "start_time": "2023-06-06T20:49:24.575639142Z" + } + }, + "outputs": [], + "source": [ + "from miplearn.io import write_pkl_gz\n", + "\n", + "data = random_uc_data(samples=500, n=500)\n", + "train_data = write_pkl_gz(data[0:450], \"uc/train\")\n", + "test_data = write_pkl_gz(data[450:500], \"uc/test\")" + ] + }, + { + "cell_type": "markdown", + "id": "b17af877", + "metadata": {}, + "source": [ + "Finally, we use `BasicCollector` to collect the optimal solutions and other useful training data for all training instances. The data is stored in HDF5 files `uc/train/00000.h5`, `uc/train/00001.h5`, etc. The optimization models are also exported to compressed MPS files `uc/train/00000.mps.gz`, `uc/train/00001.mps.gz`, etc." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "7623f002", + "metadata": { + "ExecuteTime": { + "end_time": "2023-06-06T20:49:34.936729253Z", + "start_time": "2023-06-06T20:49:25.936126612Z" + } + }, + "outputs": [], + "source": [ + "from miplearn.collectors.basic import BasicCollector\n", + "\n", + "bc = BasicCollector()\n", + "bc.collect(train_data, build_uc_model, n_jobs=4)" + ] + }, + { + "cell_type": "markdown", + "id": "c42b1be1-9723-4827-82d8-974afa51ef9f", + "metadata": {}, + "source": [ + "## Training and solving test instances" + ] + }, + { + "cell_type": "markdown", + "id": "a33c6aa4-f0b8-4ccb-9935-01f7d7de2a1c", + "metadata": {}, + "source": [ + "With training data in hand, we can now design and train a machine learning model to accelerate solver performance. In this tutorial, for illustration purposes, we will use ML to generate a good warm start using $k$-nearest neighbors. More specifically, the strategy is to:\n", + "\n", + "1. Memorize the optimal solutions of all training instances;\n", + "2. Given a test instance, find the 25 most similar training instances, based on constraint right-hand sides;\n", + "3. Merge their optimal solutions into a single partial solution; specifically, only assign values to the binary variables that agree unanimously.\n", + "4. Provide this partial solution to the solver as a warm start.\n", + "\n", + "This simple strategy can be implemented as shown below, using `MemorizingPrimalComponent`. For more advanced strategies, and for the usage of more advanced classifiers, see the user guide." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "435f7bf8-4b09-4889-b1ec-b7b56e7d8ed2", + "metadata": { + "ExecuteTime": { + "end_time": "2023-06-06T20:49:38.997939600Z", + "start_time": "2023-06-06T20:49:38.968261432Z" + } + }, + "outputs": [], + "source": [ + "from sklearn.neighbors import KNeighborsClassifier\n", + "from miplearn.components.primal.actions import SetWarmStart\n", + "from miplearn.components.primal.mem import (\n", + " MemorizingPrimalComponent,\n", + " MergeTopSolutions,\n", + ")\n", + "from miplearn.extractors.fields import H5FieldsExtractor\n", + "\n", + "comp = MemorizingPrimalComponent(\n", + " clf=KNeighborsClassifier(n_neighbors=25),\n", + " extractor=H5FieldsExtractor(\n", + " instance_fields=[\"static_constr_rhs\"],\n", + " ),\n", + " constructor=MergeTopSolutions(25, [0.0, 1.0]),\n", + " action=SetWarmStart(),\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "9536e7e4-0b0d-49b0-bebd-4a848f839e94", + "metadata": {}, + "source": [ + "Having defined the ML strategy, we next construct `LearningSolver`, train the ML component and optimize one of the test instances." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "9d13dd50-3dcf-4673-a757-6f44dcc0dedf", + "metadata": { + "ExecuteTime": { + "end_time": "2023-06-06T20:49:42.072345411Z", + "start_time": "2023-06-06T20:49:41.294040974Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Gurobi Optimizer version 10.0.1 build v10.0.1rc0 (linux64)\n", + "\n", + "CPU model: Intel(R) Core(TM) i7-8750H CPU @ 2.20GHz, instruction set [SSE2|AVX|AVX2]\n", + "Thread count: 6 physical cores, 12 logical processors, using up to 12 threads\n", + "\n", + "Optimize a model with 1001 rows, 1000 columns and 2500 nonzeros\n", + "Model fingerprint: 0xa8b70287\n", + "Coefficient statistics:\n", + " Matrix range [1e+00, 2e+06]\n", + " Objective range [1e+00, 6e+07]\n", + " Bounds range [1e+00, 1e+00]\n", + " RHS range [3e+08, 3e+08]\n", + "Presolve removed 1000 rows and 500 columns\n", + "Presolve time: 0.00s\n", + "Presolved: 1 rows, 500 columns, 500 nonzeros\n", + "\n", + "Iteration Objective Primal Inf. Dual Inf. Time\n", + " 0 6.6166537e+09 5.648803e+04 0.000000e+00 0s\n", + " 1 8.2906219e+09 0.000000e+00 0.000000e+00 0s\n", + "\n", + "Solved in 1 iterations and 0.01 seconds (0.00 work units)\n", + "Optimal objective 8.290621916e+09\n", + "Gurobi Optimizer version 10.0.1 build v10.0.1rc0 (linux64)\n", + "\n", + "CPU model: Intel(R) Core(TM) i7-8750H CPU @ 2.20GHz, instruction set [SSE2|AVX|AVX2]\n", + "Thread count: 6 physical cores, 12 logical processors, using up to 12 threads\n", + "\n", + "Optimize a model with 1001 rows, 1000 columns and 2500 nonzeros\n", + "Model fingerprint: 0x4ccd7ae3\n", + "Variable types: 500 continuous, 500 integer (500 binary)\n", + "Coefficient statistics:\n", + " Matrix range [1e+00, 2e+06]\n", + " Objective range [1e+00, 6e+07]\n", + " Bounds range [1e+00, 1e+00]\n", + " RHS range [3e+08, 3e+08]\n", + "\n", + "User MIP start produced solution with objective 8.30129e+09 (0.01s)\n", + "User MIP start produced solution with objective 8.29184e+09 (0.01s)\n", + "User MIP start produced solution with objective 8.29146e+09 (0.01s)\n", + "User MIP start produced solution with objective 8.29146e+09 (0.01s)\n", + "Loaded user MIP start with objective 8.29146e+09\n", + "\n", + "Presolve time: 0.00s\n", + "Presolved: 1001 rows, 1000 columns, 2500 nonzeros\n", + "Variable types: 500 continuous, 500 integer (500 binary)\n", + "\n", + "Root relaxation: objective 8.290622e+09, 512 iterations, 0.00 seconds (0.00 work units)\n", + "\n", + " Nodes | Current Node | Objective Bounds | Work\n", + " Expl Unexpl | Obj Depth IntInf | Incumbent BestBd Gap | It/Node Time\n", + "\n", + " 0 0 8.2906e+09 0 1 8.2915e+09 8.2906e+09 0.01% - 0s\n", + "\n", + "Cutting planes:\n", + " Cover: 1\n", + " Flow cover: 2\n", + "\n", + "Explored 1 nodes (512 simplex iterations) in 0.07 seconds (0.01 work units)\n", + "Thread count was 12 (of 12 available processors)\n", + "\n", + "Solution count 3: 8.29146e+09 8.29184e+09 8.30129e+09 \n", + "\n", + "Optimal solution found (tolerance 1.00e-04)\n", + "Best objective 8.291459497797e+09, best bound 8.290645029670e+09, gap 0.0098%\n" + ] + } + ], + "source": [ + "from miplearn.solvers.learning import LearningSolver\n", + "\n", + "solver_ml = LearningSolver(components=[comp])\n", + "solver_ml.fit(train_data)\n", + "solver_ml.optimize(test_data[0], build_uc_model);" + ] + }, + { + "cell_type": "markdown", + "id": "61da6dad-7f56-4edb-aa26-c00eb5f946c0", + "metadata": {}, + "source": [ + "By examining the solve log above, specifically the line `Loaded user MIP start with objective...`, we can see that MIPLearn was able to construct an initial solution which turned out to be very close to the optimal solution to the problem. Now let us repeat the code above, but a solver which does not apply any ML strategies. Note that our previously-defined component is not provided." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "2ff391ed-e855-4228-aa09-a7641d8c2893", + "metadata": { + "ExecuteTime": { + "end_time": "2023-06-06T20:49:44.012782276Z", + "start_time": "2023-06-06T20:49:43.813974362Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Gurobi Optimizer version 10.0.1 build v10.0.1rc0 (linux64)\n", + "\n", + "CPU model: Intel(R) Core(TM) i7-8750H CPU @ 2.20GHz, instruction set [SSE2|AVX|AVX2]\n", + "Thread count: 6 physical cores, 12 logical processors, using up to 12 threads\n", + "\n", + "Optimize a model with 1001 rows, 1000 columns and 2500 nonzeros\n", + "Model fingerprint: 0xa8b70287\n", + "Coefficient statistics:\n", + " Matrix range [1e+00, 2e+06]\n", + " Objective range [1e+00, 6e+07]\n", + " Bounds range [1e+00, 1e+00]\n", + " RHS range [3e+08, 3e+08]\n", + "Presolve removed 1000 rows and 500 columns\n", + "Presolve time: 0.00s\n", + "Presolved: 1 rows, 500 columns, 500 nonzeros\n", + "\n", + "Iteration Objective Primal Inf. Dual Inf. Time\n", + " 0 6.6166537e+09 5.648803e+04 0.000000e+00 0s\n", + " 1 8.2906219e+09 0.000000e+00 0.000000e+00 0s\n", + "\n", + "Solved in 1 iterations and 0.01 seconds (0.00 work units)\n", + "Optimal objective 8.290621916e+09\n", + "Gurobi Optimizer version 10.0.1 build v10.0.1rc0 (linux64)\n", + "\n", + "CPU model: Intel(R) Core(TM) i7-8750H CPU @ 2.20GHz, instruction set [SSE2|AVX|AVX2]\n", + "Thread count: 6 physical cores, 12 logical processors, using up to 12 threads\n", + "\n", + "Optimize a model with 1001 rows, 1000 columns and 2500 nonzeros\n", + "Model fingerprint: 0x4cbbf7c7\n", + "Variable types: 500 continuous, 500 integer (500 binary)\n", + "Coefficient statistics:\n", + " Matrix range [1e+00, 2e+06]\n", + " Objective range [1e+00, 6e+07]\n", + " Bounds range [1e+00, 1e+00]\n", + " RHS range [3e+08, 3e+08]\n", + "Presolve time: 0.00s\n", + "Presolved: 1001 rows, 1000 columns, 2500 nonzeros\n", + "Variable types: 500 continuous, 500 integer (500 binary)\n", + "Found heuristic solution: objective 9.757128e+09\n", + "\n", + "Root relaxation: objective 8.290622e+09, 512 iterations, 0.00 seconds (0.00 work units)\n", + "\n", + " Nodes | Current Node | Objective Bounds | Work\n", + " Expl Unexpl | Obj Depth IntInf | Incumbent BestBd Gap | It/Node Time\n", + "\n", + " 0 0 8.2906e+09 0 1 9.7571e+09 8.2906e+09 15.0% - 0s\n", + "H 0 0 8.298273e+09 8.2906e+09 0.09% - 0s\n", + " 0 0 8.2907e+09 0 4 8.2983e+09 8.2907e+09 0.09% - 0s\n", + " 0 0 8.2907e+09 0 1 8.2983e+09 8.2907e+09 0.09% - 0s\n", + " 0 0 8.2907e+09 0 4 8.2983e+09 8.2907e+09 0.09% - 0s\n", + "H 0 0 8.293980e+09 8.2907e+09 0.04% - 0s\n", + " 0 0 8.2907e+09 0 5 8.2940e+09 8.2907e+09 0.04% - 0s\n", + " 0 0 8.2907e+09 0 1 8.2940e+09 8.2907e+09 0.04% - 0s\n", + " 0 0 8.2907e+09 0 2 8.2940e+09 8.2907e+09 0.04% - 0s\n", + " 0 0 8.2908e+09 0 1 8.2940e+09 8.2908e+09 0.04% - 0s\n", + " 0 0 8.2908e+09 0 4 8.2940e+09 8.2908e+09 0.04% - 0s\n", + " 0 0 8.2908e+09 0 4 8.2940e+09 8.2908e+09 0.04% - 0s\n", + "H 0 0 8.291465e+09 8.2908e+09 0.01% - 0s\n", + "\n", + "Cutting planes:\n", + " Gomory: 2\n", + " MIR: 1\n", + "\n", + "Explored 1 nodes (1031 simplex iterations) in 0.07 seconds (0.03 work units)\n", + "Thread count was 12 (of 12 available processors)\n", + "\n", + "Solution count 4: 8.29147e+09 8.29398e+09 8.29827e+09 9.75713e+09 \n", + "\n", + "Optimal solution found (tolerance 1.00e-04)\n", + "Best objective 8.291465302389e+09, best bound 8.290781665333e+09, gap 0.0082%\n" + ] + } + ], + "source": [ + "solver_baseline = LearningSolver(components=[])\n", + "solver_baseline.fit(train_data)\n", + "solver_baseline.optimize(test_data[0], build_uc_model);" + ] + }, + { + "cell_type": "markdown", + "id": "b6d37b88-9fcc-43ee-ac1e-2a7b1e51a266", + "metadata": {}, + "source": [ + "In the log above, the `MIP start` line is missing, and Gurobi had to start with a significantly inferior initial solution. The solver was still able to find the optimal solution at the end, but it required using its own internal heuristic procedures. In this example, because we solve very small optimization problems, there was almost no difference in terms of running time, but the difference can be significant for larger problems." + ] + }, + { + "cell_type": "markdown", + "id": "eec97f06", + "metadata": { + "tags": [] + }, + "source": [ + "## Accessing the solution\n", + "\n", + "In the example above, we used `LearningSolver.solve` together with data files to solve both the training and the test instances. The optimal solutions were saved to HDF5 files in the train/test folders, and could be retrieved by reading theses files, but that is not very convenient. In the following example, we show how to build and solve a Pyomo model entirely in-memory, using our trained solver." + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "67a6cd18", + "metadata": { + "ExecuteTime": { + "end_time": "2023-06-06T20:50:12.869892930Z", + "start_time": "2023-06-06T20:50:12.509410473Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Gurobi Optimizer version 10.0.1 build v10.0.1rc0 (linux64)\n", + "\n", + "CPU model: Intel(R) Core(TM) i7-8750H CPU @ 2.20GHz, instruction set [SSE2|AVX|AVX2]\n", + "Thread count: 6 physical cores, 12 logical processors, using up to 12 threads\n", + "\n", + "Optimize a model with 1001 rows, 1000 columns and 2500 nonzeros\n", + "Model fingerprint: 0x19042f12\n", + "Coefficient statistics:\n", + " Matrix range [1e+00, 2e+06]\n", + " Objective range [1e+00, 6e+07]\n", + " Bounds range [1e+00, 1e+00]\n", + " RHS range [3e+08, 3e+08]\n", + "Presolve removed 1000 rows and 500 columns\n", + "Presolve time: 0.00s\n", + "Presolved: 1 rows, 500 columns, 500 nonzeros\n", + "\n", + "Iteration Objective Primal Inf. Dual Inf. Time\n", + " 0 6.5917580e+09 5.627453e+04 0.000000e+00 0s\n", + " 1 8.2535968e+09 0.000000e+00 0.000000e+00 0s\n", + "\n", + "Solved in 1 iterations and 0.01 seconds (0.00 work units)\n", + "Optimal objective 8.253596777e+09\n", + "Gurobi Optimizer version 10.0.1 build v10.0.1rc0 (linux64)\n", + "\n", + "CPU model: Intel(R) Core(TM) i7-8750H CPU @ 2.20GHz, instruction set [SSE2|AVX|AVX2]\n", + "Thread count: 6 physical cores, 12 logical processors, using up to 12 threads\n", + "\n", + "Optimize a model with 1001 rows, 1000 columns and 2500 nonzeros\n", + "Model fingerprint: 0x8ee64638\n", + "Variable types: 500 continuous, 500 integer (500 binary)\n", + "Coefficient statistics:\n", + " Matrix range [1e+00, 2e+06]\n", + " Objective range [1e+00, 6e+07]\n", + " Bounds range [1e+00, 1e+00]\n", + " RHS range [3e+08, 3e+08]\n", + "\n", + "User MIP start produced solution with objective 8.25814e+09 (0.01s)\n", + "User MIP start produced solution with objective 8.25512e+09 (0.01s)\n", + "User MIP start produced solution with objective 8.25459e+09 (0.04s)\n", + "User MIP start produced solution with objective 8.25459e+09 (0.04s)\n", + "Loaded user MIP start with objective 8.25459e+09\n", + "\n", + "Presolve time: 0.01s\n", + "Presolved: 1001 rows, 1000 columns, 2500 nonzeros\n", + "Variable types: 500 continuous, 500 integer (500 binary)\n", + "\n", + "Root relaxation: objective 8.253597e+09, 512 iterations, 0.00 seconds (0.00 work units)\n", + "\n", + " Nodes | Current Node | Objective Bounds | Work\n", + " Expl Unexpl | Obj Depth IntInf | Incumbent BestBd Gap | It/Node Time\n", + "\n", + " 0 0 8.2536e+09 0 1 8.2546e+09 8.2536e+09 0.01% - 0s\n", + " 0 0 8.2537e+09 0 3 8.2546e+09 8.2537e+09 0.01% - 0s\n", + " 0 0 8.2537e+09 0 1 8.2546e+09 8.2537e+09 0.01% - 0s\n", + " 0 0 8.2537e+09 0 4 8.2546e+09 8.2537e+09 0.01% - 0s\n", + " 0 0 8.2537e+09 0 4 8.2546e+09 8.2537e+09 0.01% - 0s\n", + " 0 0 8.2538e+09 0 4 8.2546e+09 8.2538e+09 0.01% - 0s\n", + " 0 0 8.2538e+09 0 5 8.2546e+09 8.2538e+09 0.01% - 0s\n", + " 0 0 8.2538e+09 0 6 8.2546e+09 8.2538e+09 0.01% - 0s\n", + "\n", + "Cutting planes:\n", + " Cover: 1\n", + " MIR: 2\n", + " StrongCG: 1\n", + " Flow cover: 1\n", + "\n", + "Explored 1 nodes (575 simplex iterations) in 0.12 seconds (0.01 work units)\n", + "Thread count was 12 (of 12 available processors)\n", + "\n", + "Solution count 3: 8.25459e+09 8.25512e+09 8.25814e+09 \n", + "\n", + "Optimal solution found (tolerance 1.00e-04)\n", + "Best objective 8.254590409970e+09, best bound 8.253768093811e+09, gap 0.0100%\n", + "obj = 8254590409.969726\n", + "x = [1.0, 1.0, 0.0]\n", + "y = [935662.0949263407, 1604270.0218116897, 0.0]\n" + ] + } + ], + "source": [ + "data = random_uc_data(samples=1, n=500)[0]\n", + "model = build_uc_model(data)\n", + "solver_ml.optimize(model)\n", + "print(\"obj =\", model.inner.objVal)\n", + "print(\"x =\", [model.inner._x[i].x for i in range(3)])\n", + "print(\"y =\", [model.inner._y[i].x for i in range(3)])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5593d23a-83bd-4e16-8253-6300f5e3f63b", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.16" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/0.3/_sources/tutorials/getting-started-jump.ipynb.txt b/0.3/_sources/tutorials/getting-started-jump.ipynb.txt new file mode 100644 index 0000000..ec76ac2 --- /dev/null +++ b/0.3/_sources/tutorials/getting-started-jump.ipynb.txt @@ -0,0 +1,680 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "6b8983b1", + "metadata": { + "tags": [] + }, + "source": [ + "# Getting started (JuMP)\n", + "\n", + "## Introduction\n", + "\n", + "**MIPLearn** is an open source framework that uses machine learning (ML) to accelerate the performance of mixed-integer programming solvers (e.g. Gurobi, CPLEX, XPRESS). In this tutorial, we will:\n", + "\n", + "1. Install the Julia/JuMP version of MIPLearn\n", + "2. Model a simple optimization problem using JuMP\n", + "3. Generate training data and train the ML models\n", + "4. Use the ML models together Gurobi to solve new instances\n", + "\n", + "
\n", + "Warning\n", + " \n", + "MIPLearn is still in early development stage. If run into any bugs or issues, please submit a bug report in our GitHub repository. Comments, suggestions and pull requests are also very welcome!\n", + " \n", + "
\n" + ] + }, + { + "cell_type": "markdown", + "id": "02f0a927", + "metadata": {}, + "source": [ + "## Installation\n", + "\n", + "MIPLearn is available in two versions:\n", + "\n", + "- Python version, compatible with the Pyomo and Gurobipy modeling languages,\n", + "- Julia version, compatible with the JuMP modeling language.\n", + "\n", + "In this tutorial, we will demonstrate how to use and install the Python/Pyomo version of the package. The first step is to install Julia in your machine. See the [official Julia website for more instructions](https://julialang.org/downloads/). After Julia is installed, launch the Julia REPL, type `]` to enter package mode, then install MIPLearn:\n", + "\n", + "```\n", + "pkg> add MIPLearn@0.3\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "e8274543", + "metadata": {}, + "source": [ + "In addition to MIPLearn itself, we will also install:\n", + "\n", + "- the JuMP modeling language\n", + "- Gurobi, a state-of-the-art commercial MILP solver\n", + "- Distributions, to generate random data\n", + "- PyCall, to access ML model from Scikit-Learn\n", + "- Suppressor, to make the output cleaner\n", + "\n", + "```\n", + "pkg> add JuMP@1, Gurobi@1, Distributions@0.25, PyCall@1, Suppressor@0.2\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "a14e4550", + "metadata": {}, + "source": [ + "
\n", + " \n", + "Note\n", + "\n", + "- If you do not have a Gurobi license available, you can also follow the tutorial by installing an open-source solver, such as `HiGHS`, and replacing `Gurobi.Optimizer` by `HiGHS.Optimizer` in all the code examples.\n", + "- In the code above, we install specific version of all packages to ensure that this tutorial keeps running in the future, even when newer (and possibly incompatible) versions of the packages are released. This is usually a recommended practice for all Julia projects.\n", + " \n", + "
" + ] + }, + { + "cell_type": "markdown", + "id": "16b86823", + "metadata": {}, + "source": [ + "## Modeling a simple optimization problem\n", + "\n", + "To illustrate how can MIPLearn be used, we will model and solve a small optimization problem related to power systems optimization. The problem we discuss below is a simplification of the **unit commitment problem,** a practical optimization problem solved daily by electric grid operators around the world. \n", + "\n", + "Suppose that a utility company needs to decide which electrical generators should be online at each hour of the day, as well as how much power should each generator produce. More specifically, assume that the company owns $n$ generators, denoted by $g_1, \\ldots, g_n$. Each generator can either be online or offline. An online generator $g_i$ can produce between $p^\\text{min}_i$ to $p^\\text{max}_i$ megawatts of power, and it costs the company $c^\\text{fix}_i + c^\\text{var}_i y_i$, where $y_i$ is the amount of power produced. An offline generator produces nothing and costs nothing. The total amount of power to be produced needs to be exactly equal to the total demand $d$ (in megawatts).\n", + "\n", + "This simple problem can be modeled as a *mixed-integer linear optimization* problem as follows. For each generator $g_i$, let $x_i \\in \\{0,1\\}$ be a decision variable indicating whether $g_i$ is online, and let $y_i \\geq 0$ be a decision variable indicating how much power does $g_i$ produce. The problem is then given by:" + ] + }, + { + "cell_type": "markdown", + "id": "f12c3702", + "metadata": {}, + "source": [ + "$$\n", + "\\begin{align}\n", + "\\text{minimize } \\quad & \\sum_{i=1}^n \\left( c^\\text{fix}_i x_i + c^\\text{var}_i y_i \\right) \\\\\n", + "\\text{subject to } \\quad & y_i \\leq p^\\text{max}_i x_i & i=1,\\ldots,n \\\\\n", + "& y_i \\geq p^\\text{min}_i x_i & i=1,\\ldots,n \\\\\n", + "& \\sum_{i=1}^n y_i = d \\\\\n", + "& x_i \\in \\{0,1\\} & i=1,\\ldots,n \\\\\n", + "& y_i \\geq 0 & i=1,\\ldots,n\n", + "\\end{align}\n", + "$$" + ] + }, + { + "cell_type": "markdown", + "id": "be3989ed", + "metadata": {}, + "source": [ + "
\n", + "\n", + "Note\n", + "\n", + "We use a simplified version of the unit commitment problem in this tutorial just to make it easier to follow. MIPLearn can also handle realistic, large-scale versions of this problem.\n", + "\n", + "
" + ] + }, + { + "cell_type": "markdown", + "id": "a5fd33f6", + "metadata": {}, + "source": [ + "Next, let us convert this abstract mathematical formulation into a concrete optimization model, using Julia and JuMP. We start by defining a data class `UnitCommitmentData`, which holds all the input data." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "c62ebff1-db40-45a1-9997-d121837f067b", + "metadata": {}, + "outputs": [], + "source": [ + "struct UnitCommitmentData\n", + " demand::Float64\n", + " pmin::Vector{Float64}\n", + " pmax::Vector{Float64}\n", + " cfix::Vector{Float64}\n", + " cvar::Vector{Float64}\n", + "end;" + ] + }, + { + "cell_type": "markdown", + "id": "29f55efa-0751-465a-9b0a-a821d46a3d40", + "metadata": {}, + "source": [ + "Next, we write a `build_uc_model` function, which converts the input data into a concrete JuMP model. The function accepts `UnitCommitmentData`, the data structure we previously defined, or the path to a JLD2 file containing this data." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "79ef7775-18ca-4dfa-b438-49860f762ad0", + "metadata": {}, + "outputs": [], + "source": [ + "using MIPLearn\n", + "using JuMP\n", + "using Gurobi\n", + "\n", + "function build_uc_model(data)\n", + " if data isa String\n", + " data = read_jld2(data)\n", + " end\n", + " model = Model(Gurobi.Optimizer)\n", + " G = 1:length(data.pmin)\n", + " @variable(model, x[G], Bin)\n", + " @variable(model, y[G] >= 0)\n", + " @objective(model, Min, sum(data.cfix[g] * x[g] + data.cvar[g] * y[g] for g in G))\n", + " @constraint(model, eq_max_power[g in G], y[g] <= data.pmax[g] * x[g])\n", + " @constraint(model, eq_min_power[g in G], y[g] >= data.pmin[g] * x[g])\n", + " @constraint(model, eq_demand, sum(y[g] for g in G) == data.demand)\n", + " return JumpModel(model)\n", + "end;" + ] + }, + { + "cell_type": "markdown", + "id": "c22714a3", + "metadata": {}, + "source": [ + "At this point, we can already use Gurobi to find optimal solutions to any instance of this problem. To illustrate this, let us solve a small instance with three generators:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "dd828d68-fd43-4d2a-a058-3e2628d99d9e", + "metadata": { + "ExecuteTime": { + "end_time": "2023-06-06T20:01:10.993801745Z", + "start_time": "2023-06-06T20:01:10.887580927Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Gurobi Optimizer version 10.0.1 build v10.0.1rc0 (linux64)\n", + "\n", + "CPU model: AMD Ryzen 9 7950X 16-Core Processor, instruction set [SSE2|AVX|AVX2|AVX512]\n", + "Thread count: 16 physical cores, 32 logical processors, using up to 32 threads\n", + "\n", + "Optimize a model with 7 rows, 6 columns and 15 nonzeros\n", + "Model fingerprint: 0x55e33a07\n", + "Variable types: 3 continuous, 3 integer (3 binary)\n", + "Coefficient statistics:\n", + " Matrix range [1e+00, 7e+01]\n", + " Objective range [2e+00, 7e+02]\n", + " Bounds range [0e+00, 0e+00]\n", + " RHS range [1e+02, 1e+02]\n", + "Presolve removed 2 rows and 1 columns\n", + "Presolve time: 0.00s\n", + "Presolved: 5 rows, 5 columns, 13 nonzeros\n", + "Variable types: 0 continuous, 5 integer (3 binary)\n", + "Found heuristic solution: objective 1400.0000000\n", + "\n", + "Root relaxation: objective 1.035000e+03, 3 iterations, 0.00 seconds (0.00 work units)\n", + "\n", + " Nodes | Current Node | Objective Bounds | Work\n", + " Expl Unexpl | Obj Depth IntInf | Incumbent BestBd Gap | It/Node Time\n", + "\n", + " 0 0 1035.00000 0 1 1400.00000 1035.00000 26.1% - 0s\n", + " 0 0 1105.71429 0 1 1400.00000 1105.71429 21.0% - 0s\n", + "* 0 0 0 1320.0000000 1320.00000 0.00% - 0s\n", + "\n", + "Explored 1 nodes (5 simplex iterations) in 0.00 seconds (0.00 work units)\n", + "Thread count was 32 (of 32 available processors)\n", + "\n", + "Solution count 2: 1320 1400 \n", + "\n", + "Optimal solution found (tolerance 1.00e-04)\n", + "Best objective 1.320000000000e+03, best bound 1.320000000000e+03, gap 0.0000%\n", + "\n", + "User-callback calls 371, time in user-callback 0.00 sec\n", + "objective_value(model.inner) = 1320.0\n", + "Vector(value.(model.inner[:x])) = [-0.0, 1.0, 1.0]\n", + "Vector(value.(model.inner[:y])) = [0.0, 60.0, 40.0]\n" + ] + } + ], + "source": [ + "model = build_uc_model(\n", + " UnitCommitmentData(\n", + " 100.0, # demand\n", + " [10, 20, 30], # pmin\n", + " [50, 60, 70], # pmax\n", + " [700, 600, 500], # cfix\n", + " [1.5, 2.0, 2.5], # cvar\n", + " )\n", + ")\n", + "model.optimize()\n", + "@show objective_value(model.inner)\n", + "@show Vector(value.(model.inner[:x]))\n", + "@show Vector(value.(model.inner[:y]));" + ] + }, + { + "cell_type": "markdown", + "id": "41b03bbc", + "metadata": {}, + "source": [ + "Running the code above, we found that the optimal solution for our small problem instance costs \\$1320. It is achieve by keeping generators 2 and 3 online and producing, respectively, 60 MW and 40 MW of power." + ] + }, + { + "cell_type": "markdown", + "id": "01f576e1-1790-425e-9e5c-9fa07b6f4c26", + "metadata": {}, + "source": [ + "
\n", + " \n", + "Notes\n", + " \n", + "- In the example above, `JumpModel` is just a thin wrapper around a standard JuMP model. This wrapper allows MIPLearn to be solver- and modeling-language-agnostic. The wrapper provides only a few basic methods, such as `optimize`. For more control, and to query the solution, the original JuMP model can be accessed through `model.inner`, as illustrated above.\n", + "
" + ] + }, + { + "cell_type": "markdown", + "id": "cf60c1dd", + "metadata": {}, + "source": [ + "## Generating training data\n", + "\n", + "Although Gurobi could solve the small example above in a fraction of a second, it gets slower for larger and more complex versions of the problem. If this is a problem that needs to be solved frequently, as it is often the case in practice, it could make sense to spend some time upfront generating a **trained** solver, which can optimize new instances (similar to the ones it was trained on) faster.\n", + "\n", + "In the following, we will use MIPLearn to train machine learning models that is able to predict the optimal solution for instances that follow a given probability distribution, then it will provide this predicted solution to Gurobi as a warm start. Before we can train the model, we need to collect training data by solving a large number of instances. In real-world situations, we may construct these training instances based on historical data. In this tutorial, we will construct them using a random instance generator:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "1326efd7-3869-4137-ab6b-df9cb609a7e0", + "metadata": {}, + "outputs": [], + "source": [ + "using Distributions\n", + "using Random\n", + "\n", + "function random_uc_data(; samples::Int, n::Int, seed::Int=42)::Vector\n", + " Random.seed!(seed)\n", + " pmin = rand(Uniform(100_000, 500_000), n)\n", + " pmax = pmin .* rand(Uniform(2, 2.5), n)\n", + " cfix = pmin .* rand(Uniform(100, 125), n)\n", + " cvar = rand(Uniform(1.25, 1.50), n)\n", + " return [\n", + " UnitCommitmentData(\n", + " sum(pmax) * rand(Uniform(0.5, 0.75)),\n", + " pmin,\n", + " pmax,\n", + " cfix,\n", + " cvar,\n", + " )\n", + " for _ in 1:samples\n", + " ]\n", + "end;" + ] + }, + { + "cell_type": "markdown", + "id": "3a03a7ac", + "metadata": {}, + "source": [ + "In this example, for simplicity, only the demands change from one instance to the next. We could also have randomized the costs, production limits or even the number of units. The more randomization we have in the training data, however, the more challenging it is for the machine learning models to learn solution patterns.\n", + "\n", + "Now we generate 500 instances of this problem, each one with 50 generators, and we use 450 of these instances for training. After generating the instances, we write them to individual files. MIPLearn uses files during the training process because, for large-scale optimization problems, it is often impractical to hold in memory the entire training data, as well as the concrete Pyomo models. Files also make it much easier to solve multiple instances simultaneously, potentially on multiple machines. The code below generates the files `uc/train/00001.jld2`, `uc/train/00002.jld2`, etc., which contain the input data in JLD2 format." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "6156752c", + "metadata": { + "ExecuteTime": { + "end_time": "2023-06-06T20:03:04.782830561Z", + "start_time": "2023-06-06T20:03:04.530421396Z" + } + }, + "outputs": [], + "source": [ + "data = random_uc_data(samples=500, n=500)\n", + "train_data = write_jld2(data[1:450], \"uc/train\")\n", + "test_data = write_jld2(data[451:500], \"uc/test\");" + ] + }, + { + "cell_type": "markdown", + "id": "b17af877", + "metadata": {}, + "source": [ + "Finally, we use `BasicCollector` to collect the optimal solutions and other useful training data for all training instances. The data is stored in HDF5 files `uc/train/00001.h5`, `uc/train/00002.h5`, etc. The optimization models are also exported to compressed MPS files `uc/train/00001.mps.gz`, `uc/train/00002.mps.gz`, etc." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "7623f002", + "metadata": { + "ExecuteTime": { + "end_time": "2023-06-06T20:03:35.571497019Z", + "start_time": "2023-06-06T20:03:25.804104036Z" + } + }, + "outputs": [], + "source": [ + "using Suppressor\n", + "@suppress_out begin\n", + " bc = BasicCollector()\n", + " bc.collect(train_data, build_uc_model)\n", + "end" + ] + }, + { + "cell_type": "markdown", + "id": "c42b1be1-9723-4827-82d8-974afa51ef9f", + "metadata": {}, + "source": [ + "## Training and solving test instances" + ] + }, + { + "cell_type": "markdown", + "id": "a33c6aa4-f0b8-4ccb-9935-01f7d7de2a1c", + "metadata": {}, + "source": [ + "With training data in hand, we can now design and train a machine learning model to accelerate solver performance. In this tutorial, for illustration purposes, we will use ML to generate a good warm start using $k$-nearest neighbors. More specifically, the strategy is to:\n", + "\n", + "1. Memorize the optimal solutions of all training instances;\n", + "2. Given a test instance, find the 25 most similar training instances, based on constraint right-hand sides;\n", + "3. Merge their optimal solutions into a single partial solution; specifically, only assign values to the binary variables that agree unanimously.\n", + "4. Provide this partial solution to the solver as a warm start.\n", + "\n", + "This simple strategy can be implemented as shown below, using `MemorizingPrimalComponent`. For more advanced strategies, and for the usage of more advanced classifiers, see the user guide." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "435f7bf8-4b09-4889-b1ec-b7b56e7d8ed2", + "metadata": { + "ExecuteTime": { + "end_time": "2023-06-06T20:05:20.497772794Z", + "start_time": "2023-06-06T20:05:20.484821405Z" + } + }, + "outputs": [], + "source": [ + "# Load kNN classifier from Scikit-Learn\n", + "using PyCall\n", + "KNeighborsClassifier = pyimport(\"sklearn.neighbors\").KNeighborsClassifier\n", + "\n", + "# Build the MIPLearn component\n", + "comp = MemorizingPrimalComponent(\n", + " clf=KNeighborsClassifier(n_neighbors=25),\n", + " extractor=H5FieldsExtractor(\n", + " instance_fields=[\"static_constr_rhs\"],\n", + " ),\n", + " constructor=MergeTopSolutions(25, [0.0, 1.0]),\n", + " action=SetWarmStart(),\n", + ");" + ] + }, + { + "cell_type": "markdown", + "id": "9536e7e4-0b0d-49b0-bebd-4a848f839e94", + "metadata": {}, + "source": [ + "Having defined the ML strategy, we next construct `LearningSolver`, train the ML component and optimize one of the test instances." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "9d13dd50-3dcf-4673-a757-6f44dcc0dedf", + "metadata": { + "ExecuteTime": { + "end_time": "2023-06-06T20:05:22.672002339Z", + "start_time": "2023-06-06T20:05:21.447466634Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Gurobi Optimizer version 10.0.1 build v10.0.1rc0 (linux64)\n", + "\n", + "CPU model: AMD Ryzen 9 7950X 16-Core Processor, instruction set [SSE2|AVX|AVX2|AVX512]\n", + "Thread count: 16 physical cores, 32 logical processors, using up to 32 threads\n", + "\n", + "Optimize a model with 1001 rows, 1000 columns and 2500 nonzeros\n", + "Model fingerprint: 0xd2378195\n", + "Variable types: 500 continuous, 500 integer (500 binary)\n", + "Coefficient statistics:\n", + " Matrix range [1e+00, 1e+06]\n", + " Objective range [1e+00, 6e+07]\n", + " Bounds range [0e+00, 0e+00]\n", + " RHS range [2e+08, 2e+08]\n", + "\n", + "User MIP start produced solution with objective 1.02165e+10 (0.00s)\n", + "Loaded user MIP start with objective 1.02165e+10\n", + "\n", + "Presolve time: 0.00s\n", + "Presolved: 1001 rows, 1000 columns, 2500 nonzeros\n", + "Variable types: 500 continuous, 500 integer (500 binary)\n", + "\n", + "Root relaxation: objective 1.021568e+10, 510 iterations, 0.00 seconds (0.00 work units)\n", + "\n", + " Nodes | Current Node | Objective Bounds | Work\n", + " Expl Unexpl | Obj Depth IntInf | Incumbent BestBd Gap | It/Node Time\n", + "\n", + " 0 0 1.0216e+10 0 1 1.0217e+10 1.0216e+10 0.01% - 0s\n", + "\n", + "Explored 1 nodes (510 simplex iterations) in 0.01 seconds (0.00 work units)\n", + "Thread count was 32 (of 32 available processors)\n", + "\n", + "Solution count 1: 1.02165e+10 \n", + "\n", + "Optimal solution found (tolerance 1.00e-04)\n", + "Best objective 1.021651058978e+10, best bound 1.021567971257e+10, gap 0.0081%\n", + "\n", + "User-callback calls 169, time in user-callback 0.00 sec\n" + ] + } + ], + "source": [ + "solver_ml = LearningSolver(components=[comp])\n", + "solver_ml.fit(train_data)\n", + "solver_ml.optimize(test_data[1], build_uc_model);" + ] + }, + { + "cell_type": "markdown", + "id": "61da6dad-7f56-4edb-aa26-c00eb5f946c0", + "metadata": {}, + "source": [ + "By examining the solve log above, specifically the line `Loaded user MIP start with objective...`, we can see that MIPLearn was able to construct an initial solution which turned out to be very close to the optimal solution to the problem. Now let us repeat the code above, but a solver which does not apply any ML strategies. Note that our previously-defined component is not provided." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "2ff391ed-e855-4228-aa09-a7641d8c2893", + "metadata": { + "ExecuteTime": { + "end_time": "2023-06-06T20:05:46.969575966Z", + "start_time": "2023-06-06T20:05:46.420803286Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Gurobi Optimizer version 10.0.1 build v10.0.1rc0 (linux64)\n", + "\n", + "CPU model: AMD Ryzen 9 7950X 16-Core Processor, instruction set [SSE2|AVX|AVX2|AVX512]\n", + "Thread count: 16 physical cores, 32 logical processors, using up to 32 threads\n", + "\n", + "Optimize a model with 1001 rows, 1000 columns and 2500 nonzeros\n", + "Model fingerprint: 0xb45c0594\n", + "Variable types: 500 continuous, 500 integer (500 binary)\n", + "Coefficient statistics:\n", + " Matrix range [1e+00, 1e+06]\n", + " Objective range [1e+00, 6e+07]\n", + " Bounds range [0e+00, 0e+00]\n", + " RHS range [2e+08, 2e+08]\n", + "Presolve time: 0.00s\n", + "Presolved: 1001 rows, 1000 columns, 2500 nonzeros\n", + "Variable types: 500 continuous, 500 integer (500 binary)\n", + "Found heuristic solution: objective 1.071463e+10\n", + "\n", + "Root relaxation: objective 1.021568e+10, 510 iterations, 0.00 seconds (0.00 work units)\n", + "\n", + " Nodes | Current Node | Objective Bounds | Work\n", + " Expl Unexpl | Obj Depth IntInf | Incumbent BestBd Gap | It/Node Time\n", + "\n", + " 0 0 1.0216e+10 0 1 1.0715e+10 1.0216e+10 4.66% - 0s\n", + "H 0 0 1.025162e+10 1.0216e+10 0.35% - 0s\n", + " 0 0 1.0216e+10 0 1 1.0252e+10 1.0216e+10 0.35% - 0s\n", + "H 0 0 1.023090e+10 1.0216e+10 0.15% - 0s\n", + "H 0 0 1.022335e+10 1.0216e+10 0.07% - 0s\n", + "H 0 0 1.022281e+10 1.0216e+10 0.07% - 0s\n", + "H 0 0 1.021753e+10 1.0216e+10 0.02% - 0s\n", + "H 0 0 1.021752e+10 1.0216e+10 0.02% - 0s\n", + " 0 0 1.0216e+10 0 3 1.0218e+10 1.0216e+10 0.02% - 0s\n", + " 0 0 1.0216e+10 0 1 1.0218e+10 1.0216e+10 0.02% - 0s\n", + "H 0 0 1.021651e+10 1.0216e+10 0.01% - 0s\n", + "\n", + "Explored 1 nodes (764 simplex iterations) in 0.03 seconds (0.02 work units)\n", + "Thread count was 32 (of 32 available processors)\n", + "\n", + "Solution count 7: 1.02165e+10 1.02175e+10 1.02228e+10 ... 1.07146e+10\n", + "\n", + "Optimal solution found (tolerance 1.00e-04)\n", + "Best objective 1.021651058978e+10, best bound 1.021573363741e+10, gap 0.0076%\n", + "\n", + "User-callback calls 204, time in user-callback 0.00 sec\n" + ] + } + ], + "source": [ + "solver_baseline = LearningSolver(components=[])\n", + "solver_baseline.fit(train_data)\n", + "solver_baseline.optimize(test_data[1], build_uc_model);" + ] + }, + { + "cell_type": "markdown", + "id": "b6d37b88-9fcc-43ee-ac1e-2a7b1e51a266", + "metadata": {}, + "source": [ + "In the log above, the `MIP start` line is missing, and Gurobi had to start with a significantly inferior initial solution. The solver was still able to find the optimal solution at the end, but it required using its own internal heuristic procedures. In this example, because we solve very small optimization problems, there was almost no difference in terms of running time, but the difference can be significant for larger problems." + ] + }, + { + "cell_type": "markdown", + "id": "eec97f06", + "metadata": { + "tags": [] + }, + "source": [ + "## Accessing the solution\n", + "\n", + "In the example above, we used `LearningSolver.solve` together with data files to solve both the training and the test instances. The optimal solutions were saved to HDF5 files in the train/test folders, and could be retrieved by reading theses files, but that is not very convenient. In the following example, we show how to build and solve a JuMP model entirely in-memory, using our trained solver." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "67a6cd18", + "metadata": { + "ExecuteTime": { + "end_time": "2023-06-06T20:06:26.913448568Z", + "start_time": "2023-06-06T20:06:26.169047914Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Gurobi Optimizer version 10.0.1 build v10.0.1rc0 (linux64)\n", + "\n", + "CPU model: AMD Ryzen 9 7950X 16-Core Processor, instruction set [SSE2|AVX|AVX2|AVX512]\n", + "Thread count: 16 physical cores, 32 logical processors, using up to 32 threads\n", + "\n", + "Optimize a model with 1001 rows, 1000 columns and 2500 nonzeros\n", + "Model fingerprint: 0x974a7fba\n", + "Variable types: 500 continuous, 500 integer (500 binary)\n", + "Coefficient statistics:\n", + " Matrix range [1e+00, 1e+06]\n", + " Objective range [1e+00, 6e+07]\n", + " Bounds range [0e+00, 0e+00]\n", + " RHS range [2e+08, 2e+08]\n", + "\n", + "User MIP start produced solution with objective 9.86729e+09 (0.00s)\n", + "User MIP start produced solution with objective 9.86675e+09 (0.00s)\n", + "User MIP start produced solution with objective 9.86654e+09 (0.01s)\n", + "User MIP start produced solution with objective 9.8661e+09 (0.01s)\n", + "Loaded user MIP start with objective 9.8661e+09\n", + "\n", + "Presolve time: 0.00s\n", + "Presolved: 1001 rows, 1000 columns, 2500 nonzeros\n", + "Variable types: 500 continuous, 500 integer (500 binary)\n", + "\n", + "Root relaxation: objective 9.865344e+09, 510 iterations, 0.00 seconds (0.00 work units)\n", + "\n", + " Nodes | Current Node | Objective Bounds | Work\n", + " Expl Unexpl | Obj Depth IntInf | Incumbent BestBd Gap | It/Node Time\n", + "\n", + " 0 0 9.8653e+09 0 1 9.8661e+09 9.8653e+09 0.01% - 0s\n", + "\n", + "Explored 1 nodes (510 simplex iterations) in 0.02 seconds (0.01 work units)\n", + "Thread count was 32 (of 32 available processors)\n", + "\n", + "Solution count 4: 9.8661e+09 9.86654e+09 9.86675e+09 9.86729e+09 \n", + "\n", + "Optimal solution found (tolerance 1.00e-04)\n", + "Best objective 9.866096485614e+09, best bound 9.865343669936e+09, gap 0.0076%\n", + "\n", + "User-callback calls 182, time in user-callback 0.00 sec\n", + "objective_value(model.inner) = 9.866096485613789e9\n" + ] + } + ], + "source": [ + "data = random_uc_data(samples=1, n=500)[1]\n", + "model = build_uc_model(data)\n", + "solver_ml.optimize(model)\n", + "@show objective_value(model.inner);" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Julia 1.9.0", + "language": "julia", + "name": "julia-1.9" + }, + "language_info": { + "file_extension": ".jl", + "mimetype": "application/julia", + "name": "julia", + "version": "1.9.0" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/0.3/_sources/tutorials/getting-started-pyomo.ipynb.txt b/0.3/_sources/tutorials/getting-started-pyomo.ipynb.txt index 55c5830..c3b46a7 100644 --- a/0.3/_sources/tutorials/getting-started-pyomo.ipynb.txt +++ b/0.3/_sources/tutorials/getting-started-pyomo.ipynb.txt @@ -11,17 +11,17 @@ "\n", "## Introduction\n", "\n", - "**MIPLearn** is an open source framework that uses machine learning (ML) to accelerate the performance of both commercial and open source mixed-integer programming solvers (e.g. Gurobi, CPLEX, XPRESS, Cbc or SCIP). In this tutorial, we will:\n", + "**MIPLearn** is an open source framework that uses machine learning (ML) to accelerate the performance of mixed-integer programming solvers (e.g. Gurobi, CPLEX, XPRESS). In this tutorial, we will:\n", "\n", "1. Install the Python/Pyomo version of MIPLearn\n", - "2. Model a simple optimization problem using JuMP\n", + "2. Model a simple optimization problem using Pyomo\n", "3. Generate training data and train the ML models\n", "4. Use the ML models together Gurobi to solve new instances\n", "\n", "
\n", "Note\n", " \n", - "The Python/Pyomo version of MIPLearn is currently only compatible with with Gurobi, CPLEX and XPRESS. For broader solver compatibility, see the Julia/JuMP version of the package.\n", + "The Python/Pyomo version of MIPLearn is currently only compatible with Pyomo persistent solvers (Gurobi, CPLEX and XPRESS). For broader solver compatibility, see the Julia/JuMP version of the package.\n", "
\n", "\n", "
\n", @@ -41,7 +41,7 @@ "\n", "MIPLearn is available in two versions:\n", "\n", - "- Python version, compatible with the Pyomo modeling language,\n", + "- Python version, compatible with the Pyomo and Gurobipy modeling languages,\n", "- Julia version, compatible with the JuMP modeling language.\n", "\n", "In this tutorial, we will demonstrate how to use and install the Python/Pyomo version of the package. The first step is to install Python 3.8+ in your computer. See the [official Python website for more instructions](https://www.python.org/downloads/). After Python is installed, we proceed to install MIPLearn using `pip`:" @@ -51,10 +51,15 @@ "cell_type": "code", "execution_count": 1, "id": "cd8a69c1", - "metadata": {}, + "metadata": { + "ExecuteTime": { + "end_time": "2023-06-06T19:57:33.202580815Z", + "start_time": "2023-06-06T19:57:33.198341886Z" + } + }, "outputs": [], "source": [ - "# !pip install MIPLearn==0.2.0.dev13" + "# !pip install MIPLearn==0.3.0" ] }, { @@ -62,26 +67,30 @@ "id": "e8274543", "metadata": {}, "source": [ - "In addition to MIPLearn itself, we will also install Gurobi 9.5, a state-of-the-art commercial MILP solver. This step also install a demo license for Gurobi, which should able to solve the small optimization problems in this tutorial. A paid license is required for solving large-scale problems." + "In addition to MIPLearn itself, we will also install Gurobi 10.0, a state-of-the-art commercial MILP solver. This step also install a demo license for Gurobi, which should able to solve the small optimization problems in this tutorial. A license is required for solving larger-scale problems." ] }, { "cell_type": "code", "execution_count": 2, "id": "dcc8756c", - "metadata": {}, + "metadata": { + "ExecuteTime": { + "end_time": "2023-06-06T19:57:35.756831801Z", + "start_time": "2023-06-06T19:57:33.201767088Z" + } + }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Looking in indexes: https://pypi.gurobi.com\n", - "Requirement already satisfied: gurobipy<9.6,>=9.5 in /opt/anaconda3/envs/miplearn/lib/python3.8/site-packages (9.5.1)\n" + "Requirement already satisfied: gurobipy<10.1,>=10 in /home/axavier/Software/anaconda3/envs/miplearn/lib/python3.8/site-packages (10.0.1)\n" ] } ], "source": [ - "!pip install --upgrade -i https://pypi.gurobi.com 'gurobipy>=9.5,<9.6'" + "!pip install 'gurobipy>=10,<10.1'" ] }, { @@ -107,10 +116,16 @@ "\n", "To illustrate how can MIPLearn be used, we will model and solve a small optimization problem related to power systems optimization. The problem we discuss below is a simplification of the **unit commitment problem,** a practical optimization problem solved daily by electric grid operators around the world. \n", "\n", - "Suppose that you work at a utility company, and that it is your job to decide which electrical generators should be online at a certain hour of the day, as well as how much power should each generator produce. More specifically, assume that your company owns $n$ generators, denoted by $g_1, \\ldots, g_n$. Each generator can either be online or offline. An online generator $g_i$ can produce between $p^\\text{min}_i$ to $p^\\text{max}_i$ megawatts of power, and it costs your company $c^\\text{fix}_i + c^\\text{var}_i y_i$, where $y_i$ is the amount of power produced. An offline generator produces nothing and costs nothing. You also know that the total amount of power to be produced needs to be exactly equal to the total demand $d$ (in megawatts). To minimize the costs to your company, which generators should be online, and how much power should they produce?\n", - "\n", - "This simple problem can be modeled as a *mixed-integer linear optimization* problem as follows. For each generator $g_i$, let $x_i \\in \\{0,1\\}$ be a decision variable indicating whether $g_i$ is online, and let $y_i \\geq 0$ be a decision variable indicating how much power does $g_i$ produce. The problem is then given by:\n", + "Suppose that a utility company needs to decide which electrical generators should be online at each hour of the day, as well as how much power should each generator produce. More specifically, assume that the company owns $n$ generators, denoted by $g_1, \\ldots, g_n$. Each generator can either be online or offline. An online generator $g_i$ can produce between $p^\\text{min}_i$ to $p^\\text{max}_i$ megawatts of power, and it costs the company $c^\\text{fix}_i + c^\\text{var}_i y_i$, where $y_i$ is the amount of power produced. An offline generator produces nothing and costs nothing. The total amount of power to be produced needs to be exactly equal to the total demand $d$ (in megawatts).\n", "\n", + "This simple problem can be modeled as a *mixed-integer linear optimization* problem as follows. For each generator $g_i$, let $x_i \\in \\{0,1\\}$ be a decision variable indicating whether $g_i$ is online, and let $y_i \\geq 0$ be a decision variable indicating how much power does $g_i$ produce. The problem is then given by:" + ] + }, + { + "cell_type": "markdown", + "id": "f12c3702", + "metadata": {}, + "source": [ "$$\n", "\\begin{align}\n", "\\text{minimize } \\quad & \\sum_{i=1}^n \\left( c^\\text{fix}_i x_i + c^\\text{var}_i y_i \\right) \\\\\n", @@ -120,16 +135,28 @@ "& x_i \\in \\{0,1\\} & i=1,\\ldots,n \\\\\n", "& y_i \\geq 0 & i=1,\\ldots,n\n", "\\end{align}\n", - "$$\n", - "\n", + "$$" + ] + }, + { + "cell_type": "markdown", + "id": "be3989ed", + "metadata": {}, + "source": [ "
\n", - " \n", + "\n", "Note\n", - " \n", - "We use a simplified version of the unit commitment problem in this tutorial just to make it easier to follow. MIPLearn can also handle realistic, large-scale versions of this problem. See benchmarks for more details.\n", - " \n", - "
\n", "\n", + "We use a simplified version of the unit commitment problem in this tutorial just to make it easier to follow. MIPLearn can also handle realistic, large-scale versions of this problem.\n", + "\n", + "
" + ] + }, + { + "cell_type": "markdown", + "id": "a5fd33f6", + "metadata": {}, + "source": [ "Next, let us convert this abstract mathematical formulation into a concrete optimization model, using Python and Pyomo. We start by defining a data class `UnitCommitmentData`, which holds all the input data." ] }, @@ -138,20 +165,27 @@ "execution_count": 3, "id": "22a67170-10b4-43d3-8708-014d91141e73", "metadata": { + "ExecuteTime": { + "end_time": "2023-06-06T20:00:03.278853343Z", + "start_time": "2023-06-06T20:00:03.123324067Z" + }, "tags": [] }, "outputs": [], "source": [ "from dataclasses import dataclass\n", + "from typing import List\n", + "\n", "import numpy as np\n", "\n", + "\n", "@dataclass\n", "class UnitCommitmentData:\n", " demand: float\n", - " pmin: np.ndarray\n", - " pmax: np.ndarray\n", - " cfix: np.ndarray\n", - " cvar: np.ndarray" + " pmin: List[float]\n", + " pmax: List[float]\n", + " cfix: List[float]\n", + " cvar: List[float]" ] }, { @@ -159,28 +193,38 @@ "id": "29f55efa-0751-465a-9b0a-a821d46a3d40", "metadata": {}, "source": [ - "Next, we write a `build_uc_model` function, which converts the input data into a concrete Pyomo model." + "Next, we write a `build_uc_model` function, which converts the input data into a concrete Pyomo model. The function accepts `UnitCommitmentData`, the data structure we previously defined, or the path to a compressed pickle file containing this data." ] }, { "cell_type": "code", "execution_count": 4, "id": "2f67032f-0d74-4317-b45c-19da0ec859e9", - "metadata": {}, + "metadata": { + "ExecuteTime": { + "end_time": "2023-06-06T20:00:45.890126754Z", + "start_time": "2023-06-06T20:00:45.637044282Z" + } + }, "outputs": [], "source": [ "import pyomo.environ as pe\n", + "from typing import Union\n", + "from miplearn.io import read_pkl_gz\n", + "from miplearn.solvers.pyomo import PyomoModel\n", + "\n", + "\n", + "def build_uc_model(data: Union[str, UnitCommitmentData]) -> PyomoModel:\n", + " if isinstance(data, str):\n", + " data = read_pkl_gz(data)\n", "\n", - "def build_uc_model(data: UnitCommitmentData) -> pe.ConcreteModel:\n", " model = pe.ConcreteModel()\n", " n = len(data.pmin)\n", " model.x = pe.Var(range(n), domain=pe.Binary)\n", " model.y = pe.Var(range(n), domain=pe.NonNegativeReals)\n", " model.obj = pe.Objective(\n", " expr=sum(\n", - " data.cfix[i] * model.x[i] +\n", - " data.cvar[i] * model.y[i]\n", - " for i in range(n)\n", + " data.cfix[i] * model.x[i] + data.cvar[i] * model.y[i] for i in range(n)\n", " )\n", " )\n", " model.eq_max_power = pe.ConstraintList()\n", @@ -191,7 +235,7 @@ " model.eq_demand = pe.Constraint(\n", " expr=sum(model.y[i] for i in range(n)) == data.demand,\n", " )\n", - " return model" + " return PyomoModel(model, \"gurobi_persistent\")" ] }, { @@ -206,15 +250,56 @@ "cell_type": "code", "execution_count": 5, "id": "2a896f47", - "metadata": {}, + "metadata": { + "ExecuteTime": { + "end_time": "2023-06-06T20:01:10.993801745Z", + "start_time": "2023-06-06T20:01:10.887580927Z" + } + }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Set parameter Threads to value 1\n", - "Set parameter Seed to value 42\n", - "Restricted license - for non-production use only - expires 2023-10-25\n", + "Restricted license - for non-production use only - expires 2024-10-28\n", + "Set parameter QCPDual to value 1\n", + "Gurobi Optimizer version 10.0.1 build v10.0.1rc0 (linux64)\n", + "\n", + "CPU model: Intel(R) Core(TM) i7-8750H CPU @ 2.20GHz, instruction set [SSE2|AVX|AVX2]\n", + "Thread count: 6 physical cores, 12 logical processors, using up to 12 threads\n", + "\n", + "Optimize a model with 7 rows, 6 columns and 15 nonzeros\n", + "Model fingerprint: 0x15c7a953\n", + "Variable types: 3 continuous, 3 integer (3 binary)\n", + "Coefficient statistics:\n", + " Matrix range [1e+00, 7e+01]\n", + " Objective range [2e+00, 7e+02]\n", + " Bounds range [1e+00, 1e+00]\n", + " RHS range [1e+02, 1e+02]\n", + "Presolve removed 2 rows and 1 columns\n", + "Presolve time: 0.00s\n", + "Presolved: 5 rows, 5 columns, 13 nonzeros\n", + "Variable types: 0 continuous, 5 integer (3 binary)\n", + "Found heuristic solution: objective 1400.0000000\n", + "\n", + "Root relaxation: objective 1.035000e+03, 3 iterations, 0.00 seconds (0.00 work units)\n", + "\n", + " Nodes | Current Node | Objective Bounds | Work\n", + " Expl Unexpl | Obj Depth IntInf | Incumbent BestBd Gap | It/Node Time\n", + "\n", + " 0 0 1035.00000 0 1 1400.00000 1035.00000 26.1% - 0s\n", + " 0 0 1105.71429 0 1 1400.00000 1105.71429 21.0% - 0s\n", + "* 0 0 0 1320.0000000 1320.00000 0.00% - 0s\n", + "\n", + "Explored 1 nodes (5 simplex iterations) in 0.01 seconds (0.00 work units)\n", + "Thread count was 12 (of 12 available processors)\n", + "\n", + "Solution count 2: 1320 1400 \n", + "\n", + "Optimal solution found (tolerance 1.00e-04)\n", + "Best objective 1.320000000000e+03, best bound 1.320000000000e+03, gap 0.0000%\n", + "WARNING: Cannot get reduced costs for MIP.\n", + "WARNING: Cannot get duals for MIP.\n", "obj = 1320.0\n", "x = [-0.0, 1.0, 1.0]\n", "y = [0.0, 60.0, 40.0]\n" @@ -224,20 +309,18 @@ "source": [ "model = build_uc_model(\n", " UnitCommitmentData(\n", - " demand = 100.0,\n", - " pmin = [10, 20, 30],\n", - " pmax = [50, 60, 70],\n", - " cfix = [700, 600, 500],\n", - " cvar = [1.5, 2.0, 2.5],\n", + " demand=100.0,\n", + " pmin=[10, 20, 30],\n", + " pmax=[50, 60, 70],\n", + " cfix=[700, 600, 500],\n", + " cvar=[1.5, 2.0, 2.5],\n", " )\n", ")\n", "\n", - "solver = pe.SolverFactory(\"gurobi_persistent\")\n", - "solver.set_instance(model)\n", - "solver.solve()\n", - "print(\"obj =\", model.obj())\n", - "print(\"x =\", [model.x[i].value for i in range(3)])\n", - "print(\"y =\", [model.y[i].value for i in range(3)])" + "model.optimize()\n", + "print(\"obj =\", model.inner.obj())\n", + "print(\"x =\", [model.inner.x[i].value for i in range(3)])\n", + "print(\"y =\", [model.inner.y[i].value for i in range(3)])" ] }, { @@ -248,6 +331,20 @@ "Running the code above, we found that the optimal solution for our small problem instance costs \\$1320. It is achieve by keeping generators 2 and 3 online and producing, respectively, 60 MW and 40 MW of power." ] }, + { + "cell_type": "markdown", + "id": "01f576e1-1790-425e-9e5c-9fa07b6f4c26", + "metadata": {}, + "source": [ + "
\n", + " \n", + "Notes\n", + " \n", + "- In the example above, `PyomoModel` is just a thin wrapper around a standard Pyomo model. This wrapper allows MIPLearn to be solver- and modeling-language-agnostic. The wrapper provides only a few basic methods, such as `optimize`. For more control, and to query the solution, the original Pyomo model can be accessed through `model.inner`, as illustrated above. \n", + "- To use CPLEX or XPRESS, instead of Gurobi, replace `gurobi_persistent` by `cplex_persistent` or `xpress_persistent` in the `build_uc_model`. Note that only persistent Pyomo solvers are currently supported. Pull requests adding support for other types of solver are very welcome.\n", + "
" + ] + }, { "cell_type": "markdown", "id": "cf60c1dd", @@ -255,7 +352,7 @@ "source": [ "## Generating training data\n", "\n", - "Although Gurobi could solve the small example above in a fraction of a second, it gets slower for larger and more complex versions of the problem. If this is a problem that needs to be solved frequently, as it is often the case in practice, it could make sense to spend some time upfront generating a **trained** version of Gurobi, which can solve new instances (similar to the ones it was trained on) faster.\n", + "Although Gurobi could solve the small example above in a fraction of a second, it gets slower for larger and more complex versions of the problem. If this is a problem that needs to be solved frequently, as it is often the case in practice, it could make sense to spend some time upfront generating a **trained** solver, which can optimize new instances (similar to the ones it was trained on) faster.\n", "\n", "In the following, we will use MIPLearn to train machine learning models that is able to predict the optimal solution for instances that follow a given probability distribution, then it will provide this predicted solution to Gurobi as a warm start. Before we can train the model, we need to collect training data by solving a large number of instances. In real-world situations, we may construct these training instances based on historical data. In this tutorial, we will construct them using a random instance generator:" ] @@ -264,13 +361,19 @@ "cell_type": "code", "execution_count": 6, "id": "5eb09fab", - "metadata": {}, + "metadata": { + "ExecuteTime": { + "end_time": "2023-06-06T20:02:27.324208900Z", + "start_time": "2023-06-06T20:02:26.990044230Z" + } + }, "outputs": [], "source": [ "from scipy.stats import uniform\n", "from typing import List\n", "import random\n", "\n", + "\n", "def random_uc_data(samples: int, n: int, seed: int = 42) -> List[UnitCommitmentData]:\n", " random.seed(seed)\n", " np.random.seed(seed)\n", @@ -280,13 +383,13 @@ " cvar = uniform(loc=1.25, scale=0.25).rvs(n)\n", " return [\n", " UnitCommitmentData(\n", - " demand = pmax.sum() * uniform(loc=0.5, scale=0.25).rvs(),\n", - " pmin = pmin,\n", - " pmax = pmax,\n", - " cfix = cfix,\n", - " cvar = cvar,\n", + " demand=pmax.sum() * uniform(loc=0.5, scale=0.25).rvs(),\n", + " pmin=pmin,\n", + " pmax=pmax,\n", + " cfix=cfix,\n", + " cvar=cvar,\n", " )\n", - " for i in range(samples)\n", + " for _ in range(samples)\n", " ]" ] }, @@ -297,20 +400,26 @@ "source": [ "In this example, for simplicity, only the demands change from one instance to the next. We could also have randomized the costs, production limits or even the number of units. The more randomization we have in the training data, however, the more challenging it is for the machine learning models to learn solution patterns.\n", "\n", - "Now we generate 500 instances of this problem, each one with 50 generators, and we use 450 of these instances for training. After generating the instances, we write them to individual files. MIPLearn uses files during the training process because, for large-scale optimization problems, it is often impractical to hold in memory the entire training data, as well as the concrete Pyomo models. Files also make it much easier to solve multiple instances simultaneously, potentially even on multiple machines. We will cover parallel and distributed computing in a future tutorial. The code below generates the files `uc/train/00000.pkl.gz`, `uc/train/00001.pkl.gz`, etc., which contain the input data in compressed (gzipped) pickle format." + "Now we generate 500 instances of this problem, each one with 50 generators, and we use 450 of these instances for training. After generating the instances, we write them to individual files. MIPLearn uses files during the training process because, for large-scale optimization problems, it is often impractical to hold in memory the entire training data, as well as the concrete Pyomo models. Files also make it much easier to solve multiple instances simultaneously, potentially on multiple machines. The code below generates the files `uc/train/00000.pkl.gz`, `uc/train/00001.pkl.gz`, etc., which contain the input data in compressed (gzipped) pickle format." ] }, { "cell_type": "code", "execution_count": 7, "id": "6156752c", - "metadata": {}, + "metadata": { + "ExecuteTime": { + "end_time": "2023-06-06T20:03:04.782830561Z", + "start_time": "2023-06-06T20:03:04.530421396Z" + } + }, "outputs": [], "source": [ - "from miplearn import save\n", - "data = random_uc_data(samples=500, n=50)\n", - "train_files = save(data[0:450], \"uc/train/\")\n", - "test_files = save(data[450:500], \"uc/test/\")" + "from miplearn.io import write_pkl_gz\n", + "\n", + "data = random_uc_data(samples=500, n=500)\n", + "train_data = write_pkl_gz(data[0:450], \"uc/train\")\n", + "test_data = write_pkl_gz(data[450:500], \"uc/test\")" ] }, { @@ -318,115 +427,180 @@ "id": "b17af877", "metadata": {}, "source": [ - "Finally, we use `LearningSolver` to solve all the training instances. `LearningSolver` is the main component provided by MIPLearn, which integrates MIP solvers and ML. The optimal solutions, along with other useful training data, are stored in HDF5 files `uc/train/00000.h5`, `uc/train/00001.h5`, etc." + "Finally, we use `BasicCollector` to collect the optimal solutions and other useful training data for all training instances. The data is stored in HDF5 files `uc/train/00000.h5`, `uc/train/00001.h5`, etc. The optimization models are also exported to compressed MPS files `uc/train/00000.mps.gz`, `uc/train/00001.mps.gz`, etc." ] }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 8, "id": "7623f002", - "metadata": {}, + "metadata": { + "ExecuteTime": { + "end_time": "2023-06-06T20:03:35.571497019Z", + "start_time": "2023-06-06T20:03:25.804104036Z" + } + }, "outputs": [], "source": [ - "from miplearn import LearningSolver\n", - "solver = LearningSolver()\n", - "solver.solve(train_files, build_uc_model);" + "from miplearn.collectors.basic import BasicCollector\n", + "\n", + "bc = BasicCollector()\n", + "bc.collect(train_data, build_uc_model, n_jobs=4)" ] }, { "cell_type": "markdown", - "id": "2f24ee83", + "id": "c42b1be1-9723-4827-82d8-974afa51ef9f", "metadata": {}, "source": [ - "## Solving test instances\n", + "## Training and solving test instances" + ] + }, + { + "cell_type": "markdown", + "id": "a33c6aa4-f0b8-4ccb-9935-01f7d7de2a1c", + "metadata": {}, + "source": [ + "With training data in hand, we can now design and train a machine learning model to accelerate solver performance. In this tutorial, for illustration purposes, we will use ML to generate a good warm start using $k$-nearest neighbors. More specifically, the strategy is to:\n", + "\n", + "1. Memorize the optimal solutions of all training instances;\n", + "2. Given a test instance, find the 25 most similar training instances, based on constraint right-hand sides;\n", + "3. Merge their optimal solutions into a single partial solution; specifically, only assign values to the binary variables that agree unanimously.\n", + "4. Provide this partial solution to the solver as a warm start.\n", "\n", - "With training data in hand, we can now fit the ML models, using the `LearningSolver.fit` method, then solve the test instances with `LearningSolver.solve`, as shown below. The `tee=True` parameter asks MIPLearn to print the solver log to the screen." + "This simple strategy can be implemented as shown below, using `MemorizingPrimalComponent`. For more advanced strategies, and for the usage of more advanced classifiers, see the user guide." ] }, { "cell_type": "code", "execution_count": 9, - "id": "c8385030", + "id": "435f7bf8-4b09-4889-b1ec-b7b56e7d8ed2", + "metadata": { + "ExecuteTime": { + "end_time": "2023-06-06T20:05:20.497772794Z", + "start_time": "2023-06-06T20:05:20.484821405Z" + } + }, + "outputs": [], + "source": [ + "from sklearn.neighbors import KNeighborsClassifier\n", + "from miplearn.components.primal.actions import SetWarmStart\n", + "from miplearn.components.primal.mem import (\n", + " MemorizingPrimalComponent,\n", + " MergeTopSolutions,\n", + ")\n", + "from miplearn.extractors.fields import H5FieldsExtractor\n", + "\n", + "comp = MemorizingPrimalComponent(\n", + " clf=KNeighborsClassifier(n_neighbors=25),\n", + " extractor=H5FieldsExtractor(\n", + " instance_fields=[\"static_constr_rhs\"],\n", + " ),\n", + " constructor=MergeTopSolutions(25, [0.0, 1.0]),\n", + " action=SetWarmStart(),\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "9536e7e4-0b0d-49b0-bebd-4a848f839e94", "metadata": {}, + "source": [ + "Having defined the ML strategy, we next construct `LearningSolver`, train the ML component and optimize one of the test instances." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "9d13dd50-3dcf-4673-a757-6f44dcc0dedf", + "metadata": { + "ExecuteTime": { + "end_time": "2023-06-06T20:05:22.672002339Z", + "start_time": "2023-06-06T20:05:21.447466634Z" + } + }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Set parameter LogFile to value \"/tmp/tmpvbaqbyty.log\"\n", "Set parameter QCPDual to value 1\n", - "Gurobi Optimizer version 9.5.1 build v9.5.1rc2 (linux64)\n", - "Thread count: 16 physical cores, 32 logical processors, using up to 1 threads\n", - "Optimize a model with 101 rows, 100 columns and 250 nonzeros\n", - "Model fingerprint: 0x8de73876\n", + "Gurobi Optimizer version 10.0.1 build v10.0.1rc0 (linux64)\n", + "\n", + "CPU model: Intel(R) Core(TM) i7-8750H CPU @ 2.20GHz, instruction set [SSE2|AVX|AVX2]\n", + "Thread count: 6 physical cores, 12 logical processors, using up to 12 threads\n", + "\n", + "Optimize a model with 1001 rows, 1000 columns and 2500 nonzeros\n", + "Model fingerprint: 0x5e67c6ee\n", "Coefficient statistics:\n", " Matrix range [1e+00, 2e+06]\n", " Objective range [1e+00, 6e+07]\n", " Bounds range [1e+00, 1e+00]\n", - " RHS range [2e+07, 2e+07]\n", - "Presolve removed 100 rows and 50 columns\n", + " RHS range [3e+08, 3e+08]\n", + "Presolve removed 1000 rows and 500 columns\n", "Presolve time: 0.00s\n", - "Presolved: 1 rows, 50 columns, 50 nonzeros\n", + "Presolved: 1 rows, 500 columns, 500 nonzeros\n", "\n", "Iteration Objective Primal Inf. Dual Inf. Time\n", - " 0 5.7349081e+08 1.044003e+04 0.000000e+00 0s\n", - " 1 6.8268465e+08 0.000000e+00 0.000000e+00 0s\n", - "\n", - "Solved in 1 iterations and 0.00 seconds (0.00 work units)\n", - "Optimal objective 6.826846503e+08\n", - "Set parameter LogFile to value \"\"\n", - "Set parameter LogFile to value \"/tmp/tmp48j6n35b.log\"\n", - "Gurobi Optimizer version 9.5.1 build v9.5.1rc2 (linux64)\n", - "Thread count: 16 physical cores, 32 logical processors, using up to 1 threads\n", - "Optimize a model with 101 rows, 100 columns and 250 nonzeros\n", - "Model fingerprint: 0x200d64ba\n", - "Variable types: 50 continuous, 50 integer (50 binary)\n", + " 0 6.6166537e+09 5.648803e+04 0.000000e+00 0s\n", + " 1 8.2906219e+09 0.000000e+00 0.000000e+00 0s\n", + "\n", + "Solved in 1 iterations and 0.01 seconds (0.00 work units)\n", + "Optimal objective 8.290621916e+09\n", + "Set parameter QCPDual to value 1\n", + "Gurobi Optimizer version 10.0.1 build v10.0.1rc0 (linux64)\n", + "\n", + "CPU model: Intel(R) Core(TM) i7-8750H CPU @ 2.20GHz, instruction set [SSE2|AVX|AVX2]\n", + "Thread count: 6 physical cores, 12 logical processors, using up to 12 threads\n", + "\n", + "Optimize a model with 1001 rows, 1000 columns and 2500 nonzeros\n", + "Model fingerprint: 0xa4a7961e\n", + "Variable types: 500 continuous, 500 integer (500 binary)\n", "Coefficient statistics:\n", " Matrix range [1e+00, 2e+06]\n", " Objective range [1e+00, 6e+07]\n", " Bounds range [1e+00, 1e+00]\n", - " RHS range [2e+07, 2e+07]\n", + " RHS range [3e+08, 3e+08]\n", "\n", - "User MIP start produced solution with objective 6.84841e+08 (0.00s)\n", - "Loaded user MIP start with objective 6.84841e+08\n", + "User MIP start produced solution with objective 8.30129e+09 (0.01s)\n", + "User MIP start produced solution with objective 8.29184e+09 (0.01s)\n", + "User MIP start produced solution with objective 8.29146e+09 (0.01s)\n", + "User MIP start produced solution with objective 8.29146e+09 (0.02s)\n", + "Loaded user MIP start with objective 8.29146e+09\n", "\n", - "Presolve time: 0.00s\n", - "Presolved: 101 rows, 100 columns, 250 nonzeros\n", - "Variable types: 50 continuous, 50 integer (50 binary)\n", + "Presolve time: 0.01s\n", + "Presolved: 1001 rows, 1000 columns, 2500 nonzeros\n", + "Variable types: 500 continuous, 500 integer (500 binary)\n", "\n", - "Root relaxation: objective 6.826847e+08, 56 iterations, 0.00 seconds (0.00 work units)\n", + "Root relaxation: objective 8.290622e+09, 512 iterations, 0.01 seconds (0.00 work units)\n", "\n", " Nodes | Current Node | Objective Bounds | Work\n", " Expl Unexpl | Obj Depth IntInf | Incumbent BestBd Gap | It/Node Time\n", "\n", - " 0 0 6.8268e+08 0 1 6.8484e+08 6.8268e+08 0.31% - 0s\n", - " 0 0 6.8315e+08 0 3 6.8484e+08 6.8315e+08 0.25% - 0s\n", - " 0 0 6.8315e+08 0 1 6.8484e+08 6.8315e+08 0.25% - 0s\n", - " 0 0 6.8315e+08 0 3 6.8484e+08 6.8315e+08 0.25% - 0s\n", - " 0 0 6.8315e+08 0 4 6.8484e+08 6.8315e+08 0.25% - 0s\n", - " 0 0 6.8315e+08 0 4 6.8484e+08 6.8315e+08 0.25% - 0s\n", - " 0 2 6.8327e+08 0 4 6.8484e+08 6.8327e+08 0.23% - 0s\n", + " 0 0 8.2906e+09 0 1 8.2915e+09 8.2906e+09 0.01% - 0s\n", "\n", "Cutting planes:\n", - " Flow cover: 3\n", + " Cover: 1\n", + " Flow cover: 2\n", "\n", - "Explored 32 nodes (155 simplex iterations) in 0.02 seconds (0.00 work units)\n", - "Thread count was 1 (of 32 available processors)\n", + "Explored 1 nodes (512 simplex iterations) in 0.09 seconds (0.01 work units)\n", + "Thread count was 12 (of 12 available processors)\n", "\n", - "Solution count 1: 6.84841e+08 \n", + "Solution count 3: 8.29146e+09 8.29184e+09 8.30129e+09 \n", "\n", "Optimal solution found (tolerance 1.00e-04)\n", - "Best objective 6.848411655488e+08, best bound 6.848411655488e+08, gap 0.0000%\n", - "Set parameter LogFile to value \"\"\n", + "Best objective 8.291459497797e+09, best bound 8.290645029670e+09, gap 0.0098%\n", "WARNING: Cannot get reduced costs for MIP.\n", "WARNING: Cannot get duals for MIP.\n" ] } ], "source": [ - "solver_ml = LearningSolver()\n", - "solver_ml.fit(train_files, build_uc_model)\n", - "solver_ml.solve(test_files[0:1], build_uc_model, tee=True);" + "from miplearn.solvers.learning import LearningSolver\n", + "\n", + "solver_ml = LearningSolver(components=[comp])\n", + "solver_ml.fit(train_data)\n", + "solver_ml.optimize(test_data[0], build_uc_model);" ] }, { @@ -434,100 +608,105 @@ "id": "61da6dad-7f56-4edb-aa26-c00eb5f946c0", "metadata": {}, "source": [ - "By examining the solve log above, specifically the line `Loaded user MIP start with objective...`, we can see that MIPLearn was able to construct an initial solution which turned out to be the optimal solution to the problem. Now let us repeat the code above, but using an untrained solver. Note that the `fit` line is omitted." + "By examining the solve log above, specifically the line `Loaded user MIP start with objective...`, we can see that MIPLearn was able to construct an initial solution which turned out to be very close to the optimal solution to the problem. Now let us repeat the code above, but a solver which does not apply any ML strategies. Note that our previously-defined component is not provided." ] }, { "cell_type": "code", - "execution_count": 10, - "id": "33d15d6c-6db4-477f-bd4b-fe8e84e5f023", - "metadata": {}, + "execution_count": 11, + "id": "2ff391ed-e855-4228-aa09-a7641d8c2893", + "metadata": { + "ExecuteTime": { + "end_time": "2023-06-06T20:05:46.969575966Z", + "start_time": "2023-06-06T20:05:46.420803286Z" + } + }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Set parameter LogFile to value \"/tmp/tmp3uhhdurw.log\"\n", "Set parameter QCPDual to value 1\n", - "Gurobi Optimizer version 9.5.1 build v9.5.1rc2 (linux64)\n", - "Thread count: 16 physical cores, 32 logical processors, using up to 1 threads\n", - "Optimize a model with 101 rows, 100 columns and 250 nonzeros\n", - "Model fingerprint: 0x8de73876\n", + "Gurobi Optimizer version 10.0.1 build v10.0.1rc0 (linux64)\n", + "\n", + "CPU model: Intel(R) Core(TM) i7-8750H CPU @ 2.20GHz, instruction set [SSE2|AVX|AVX2]\n", + "Thread count: 6 physical cores, 12 logical processors, using up to 12 threads\n", + "\n", + "Optimize a model with 1001 rows, 1000 columns and 2500 nonzeros\n", + "Model fingerprint: 0x5e67c6ee\n", "Coefficient statistics:\n", " Matrix range [1e+00, 2e+06]\n", " Objective range [1e+00, 6e+07]\n", " Bounds range [1e+00, 1e+00]\n", - " RHS range [2e+07, 2e+07]\n", - "Presolve removed 100 rows and 50 columns\n", - "Presolve time: 0.00s\n", - "Presolved: 1 rows, 50 columns, 50 nonzeros\n", + " RHS range [3e+08, 3e+08]\n", + "Presolve removed 1000 rows and 500 columns\n", + "Presolve time: 0.01s\n", + "Presolved: 1 rows, 500 columns, 500 nonzeros\n", "\n", "Iteration Objective Primal Inf. Dual Inf. Time\n", - " 0 5.7349081e+08 1.044003e+04 0.000000e+00 0s\n", - " 1 6.8268465e+08 0.000000e+00 0.000000e+00 0s\n", + " 0 6.6166537e+09 5.648803e+04 0.000000e+00 0s\n", + " 1 8.2906219e+09 0.000000e+00 0.000000e+00 0s\n", "\n", "Solved in 1 iterations and 0.01 seconds (0.00 work units)\n", - "Optimal objective 6.826846503e+08\n", - "Set parameter LogFile to value \"\"\n", - "Set parameter LogFile to value \"/tmp/tmp18aqg2ic.log\"\n", - "Gurobi Optimizer version 9.5.1 build v9.5.1rc2 (linux64)\n", - "Thread count: 16 physical cores, 32 logical processors, using up to 1 threads\n", - "Optimize a model with 101 rows, 100 columns and 250 nonzeros\n", - "Model fingerprint: 0xb90d1075\n", - "Variable types: 50 continuous, 50 integer (50 binary)\n", + "Optimal objective 8.290621916e+09\n", + "Set parameter QCPDual to value 1\n", + "Gurobi Optimizer version 10.0.1 build v10.0.1rc0 (linux64)\n", + "\n", + "CPU model: Intel(R) Core(TM) i7-8750H CPU @ 2.20GHz, instruction set [SSE2|AVX|AVX2]\n", + "Thread count: 6 physical cores, 12 logical processors, using up to 12 threads\n", + "\n", + "Optimize a model with 1001 rows, 1000 columns and 2500 nonzeros\n", + "Model fingerprint: 0x8a0f9587\n", + "Variable types: 500 continuous, 500 integer (500 binary)\n", "Coefficient statistics:\n", " Matrix range [1e+00, 2e+06]\n", " Objective range [1e+00, 6e+07]\n", " Bounds range [1e+00, 1e+00]\n", - " RHS range [2e+07, 2e+07]\n", - "Found heuristic solution: objective 8.056576e+08\n", + " RHS range [3e+08, 3e+08]\n", "Presolve time: 0.00s\n", - "Presolved: 101 rows, 100 columns, 250 nonzeros\n", - "Variable types: 50 continuous, 50 integer (50 binary)\n", + "Presolved: 1001 rows, 1000 columns, 2500 nonzeros\n", + "Variable types: 500 continuous, 500 integer (500 binary)\n", + "Found heuristic solution: objective 9.757128e+09\n", "\n", - "Root relaxation: objective 6.826847e+08, 56 iterations, 0.00 seconds (0.00 work units)\n", + "Root relaxation: objective 8.290622e+09, 512 iterations, 0.00 seconds (0.00 work units)\n", "\n", " Nodes | Current Node | Objective Bounds | Work\n", " Expl Unexpl | Obj Depth IntInf | Incumbent BestBd Gap | It/Node Time\n", "\n", - " 0 0 6.8268e+08 0 1 8.0566e+08 6.8268e+08 15.3% - 0s\n", - "H 0 0 7.099498e+08 6.8268e+08 3.84% - 0s\n", - " 0 0 6.8315e+08 0 3 7.0995e+08 6.8315e+08 3.78% - 0s\n", - "H 0 0 6.883227e+08 6.8315e+08 0.75% - 0s\n", - " 0 0 6.8352e+08 0 4 6.8832e+08 6.8352e+08 0.70% - 0s\n", - " 0 0 6.8352e+08 0 4 6.8832e+08 6.8352e+08 0.70% - 0s\n", - " 0 0 6.8352e+08 0 1 6.8832e+08 6.8352e+08 0.70% - 0s\n", - "H 0 0 6.862582e+08 6.8352e+08 0.40% - 0s\n", - " 0 0 6.8352e+08 0 4 6.8626e+08 6.8352e+08 0.40% - 0s\n", - " 0 0 6.8352e+08 0 4 6.8626e+08 6.8352e+08 0.40% - 0s\n", - " 0 0 6.8352e+08 0 1 6.8626e+08 6.8352e+08 0.40% - 0s\n", - " 0 0 6.8352e+08 0 3 6.8626e+08 6.8352e+08 0.40% - 0s\n", - " 0 0 6.8352e+08 0 4 6.8626e+08 6.8352e+08 0.40% - 0s\n", - " 0 0 6.8352e+08 0 4 6.8626e+08 6.8352e+08 0.40% - 0s\n", - " 0 2 6.8354e+08 0 4 6.8626e+08 6.8354e+08 0.40% - 0s\n", - "* 18 5 6 6.849018e+08 6.8413e+08 0.11% 3.1 0s\n", - "H 24 1 6.848412e+08 6.8426e+08 0.09% 3.2 0s\n", + " 0 0 8.2906e+09 0 1 9.7571e+09 8.2906e+09 15.0% - 0s\n", + "H 0 0 8.298273e+09 8.2906e+09 0.09% - 0s\n", + " 0 0 8.2907e+09 0 4 8.2983e+09 8.2907e+09 0.09% - 0s\n", + " 0 0 8.2907e+09 0 1 8.2983e+09 8.2907e+09 0.09% - 0s\n", + " 0 0 8.2907e+09 0 4 8.2983e+09 8.2907e+09 0.09% - 0s\n", + "H 0 0 8.293980e+09 8.2907e+09 0.04% - 0s\n", + " 0 0 8.2907e+09 0 5 8.2940e+09 8.2907e+09 0.04% - 0s\n", + " 0 0 8.2907e+09 0 1 8.2940e+09 8.2907e+09 0.04% - 0s\n", + " 0 0 8.2907e+09 0 2 8.2940e+09 8.2907e+09 0.04% - 0s\n", + " 0 0 8.2908e+09 0 1 8.2940e+09 8.2908e+09 0.04% - 0s\n", + " 0 0 8.2908e+09 0 4 8.2940e+09 8.2908e+09 0.04% - 0s\n", + " 0 0 8.2908e+09 0 4 8.2940e+09 8.2908e+09 0.04% - 0s\n", + "H 0 0 8.291465e+09 8.2908e+09 0.01% - 0s\n", "\n", "Cutting planes:\n", - " Gomory: 1\n", - " Flow cover: 2\n", + " Gomory: 2\n", + " MIR: 1\n", "\n", - "Explored 30 nodes (217 simplex iterations) in 0.02 seconds (0.00 work units)\n", - "Thread count was 1 (of 32 available processors)\n", + "Explored 1 nodes (1025 simplex iterations) in 0.08 seconds (0.03 work units)\n", + "Thread count was 12 (of 12 available processors)\n", "\n", - "Solution count 6: 6.84841e+08 6.84902e+08 6.86258e+08 ... 8.05658e+08\n", + "Solution count 4: 8.29147e+09 8.29398e+09 8.29827e+09 9.75713e+09 \n", "\n", "Optimal solution found (tolerance 1.00e-04)\n", - "Best objective 6.848411655488e+08, best bound 6.848411655488e+08, gap 0.0000%\n", - "Set parameter LogFile to value \"\"\n", + "Best objective 8.291465302389e+09, best bound 8.290781665333e+09, gap 0.0082%\n", "WARNING: Cannot get reduced costs for MIP.\n", "WARNING: Cannot get duals for MIP.\n" ] } ], "source": [ - "solver_baseline = LearningSolver()\n", - "solver_baseline.solve(test_files[0:1], build_uc_model, tee=True);" + "solver_baseline = LearningSolver(components=[])\n", + "solver_baseline.fit(train_data)\n", + "solver_baseline.optimize(test_data[0], build_uc_model);" ] }, { @@ -535,19 +714,7 @@ "id": "b6d37b88-9fcc-43ee-ac1e-2a7b1e51a266", "metadata": {}, "source": [ - "In the log above, the `MIP start` line is missing, and Gurobi had to start with a significantly inferior initial solution. The solver was still able to find the optimal solution at the end, but it required using its own internal heuristic procedures. In this example, because we solve very small optimization problems, there was almost no difference in terms of running time. For larger problems, however, the difference can be significant. See benchmarks for more details.\n", - "\n", - "
\n", - "Note\n", - " \n", - "In addition to partial initial solutions, MIPLearn is also able to predict lazy constraints, cutting planes and branching priorities. See the next tutorials for more details.\n", - "
\n", - "\n", - "
\n", - "Note\n", - " \n", - "It is not necessary to specify what ML models to use. MIPLearn, by default, will try a number of classical ML models and will choose the one that performs the best, based on k-fold cross validation. MIPLearn is also able to automatically collect features based on the MIP formulation of the problem and the solution to the LP relaxation, among other things, so it does not require handcrafted features. If you do want to customize the models and features, however, that is also possible, as we will see in a later tutorial.\n", - "
" + "In the log above, the `MIP start` line is missing, and Gurobi had to start with a significantly inferior initial solution. The solver was still able to find the optimal solution at the end, but it required using its own internal heuristic procedures. In this example, because we solve very small optimization problems, there was almost no difference in terms of running time, but the difference can be significant for larger problems." ] }, { @@ -564,32 +731,109 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 12, "id": "67a6cd18", - "metadata": {}, + "metadata": { + "ExecuteTime": { + "end_time": "2023-06-06T20:06:26.913448568Z", + "start_time": "2023-06-06T20:06:26.169047914Z" + } + }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "obj = 903865807.3536932\n", - " x = [1.0, 1.0, 1.0, 1.0, 1.0]\n", - " y = [1105176.593734543, 1891284.5155055337, 1708177.4224033852, 1438329.610189608, 535496.3347187206]\n" + "Set parameter QCPDual to value 1\n", + "Gurobi Optimizer version 10.0.1 build v10.0.1rc0 (linux64)\n", + "\n", + "CPU model: Intel(R) Core(TM) i7-8750H CPU @ 2.20GHz, instruction set [SSE2|AVX|AVX2]\n", + "Thread count: 6 physical cores, 12 logical processors, using up to 12 threads\n", + "\n", + "Optimize a model with 1001 rows, 1000 columns and 2500 nonzeros\n", + "Model fingerprint: 0x2dfe4e1c\n", + "Coefficient statistics:\n", + " Matrix range [1e+00, 2e+06]\n", + " Objective range [1e+00, 6e+07]\n", + " Bounds range [1e+00, 1e+00]\n", + " RHS range [3e+08, 3e+08]\n", + "Presolve removed 1000 rows and 500 columns\n", + "Presolve time: 0.01s\n", + "Presolved: 1 rows, 500 columns, 500 nonzeros\n", + "\n", + "Iteration Objective Primal Inf. Dual Inf. Time\n", + " 0 6.5917580e+09 5.627453e+04 0.000000e+00 0s\n", + " 1 8.2535968e+09 0.000000e+00 0.000000e+00 0s\n", + "\n", + "Solved in 1 iterations and 0.01 seconds (0.00 work units)\n", + "Optimal objective 8.253596777e+09\n", + "Set parameter QCPDual to value 1\n", + "Gurobi Optimizer version 10.0.1 build v10.0.1rc0 (linux64)\n", + "\n", + "CPU model: Intel(R) Core(TM) i7-8750H CPU @ 2.20GHz, instruction set [SSE2|AVX|AVX2]\n", + "Thread count: 6 physical cores, 12 logical processors, using up to 12 threads\n", + "\n", + "Optimize a model with 1001 rows, 1000 columns and 2500 nonzeros\n", + "Model fingerprint: 0x20637200\n", + "Variable types: 500 continuous, 500 integer (500 binary)\n", + "Coefficient statistics:\n", + " Matrix range [1e+00, 2e+06]\n", + " Objective range [1e+00, 6e+07]\n", + " Bounds range [1e+00, 1e+00]\n", + " RHS range [3e+08, 3e+08]\n", + "\n", + "User MIP start produced solution with objective 8.25814e+09 (0.01s)\n", + "User MIP start produced solution with objective 8.25512e+09 (0.01s)\n", + "User MIP start produced solution with objective 8.25459e+09 (0.04s)\n", + "User MIP start produced solution with objective 8.25459e+09 (0.04s)\n", + "Loaded user MIP start with objective 8.25459e+09\n", + "\n", + "Presolve time: 0.01s\n", + "Presolved: 1001 rows, 1000 columns, 2500 nonzeros\n", + "Variable types: 500 continuous, 500 integer (500 binary)\n", + "\n", + "Root relaxation: objective 8.253597e+09, 512 iterations, 0.00 seconds (0.00 work units)\n", + "\n", + " Nodes | Current Node | Objective Bounds | Work\n", + " Expl Unexpl | Obj Depth IntInf | Incumbent BestBd Gap | It/Node Time\n", + "\n", + " 0 0 8.2536e+09 0 1 8.2546e+09 8.2536e+09 0.01% - 0s\n", + " 0 0 8.2537e+09 0 3 8.2546e+09 8.2537e+09 0.01% - 0s\n", + " 0 0 8.2537e+09 0 1 8.2546e+09 8.2537e+09 0.01% - 0s\n", + " 0 0 8.2537e+09 0 4 8.2546e+09 8.2537e+09 0.01% - 0s\n", + " 0 0 8.2537e+09 0 4 8.2546e+09 8.2537e+09 0.01% - 0s\n", + " 0 0 8.2538e+09 0 4 8.2546e+09 8.2538e+09 0.01% - 0s\n", + " 0 0 8.2538e+09 0 5 8.2546e+09 8.2538e+09 0.01% - 0s\n", + " 0 0 8.2538e+09 0 6 8.2546e+09 8.2538e+09 0.01% - 0s\n", + "\n", + "Cutting planes:\n", + " Cover: 1\n", + " MIR: 2\n", + " StrongCG: 1\n", + " Flow cover: 1\n", + "\n", + "Explored 1 nodes (575 simplex iterations) in 0.11 seconds (0.01 work units)\n", + "Thread count was 12 (of 12 available processors)\n", + "\n", + "Solution count 3: 8.25459e+09 8.25512e+09 8.25814e+09 \n", + "\n", + "Optimal solution found (tolerance 1.00e-04)\n", + "Best objective 8.254590409970e+09, best bound 8.253768093811e+09, gap 0.0100%\n", + "WARNING: Cannot get reduced costs for MIP.\n", + "WARNING: Cannot get duals for MIP.\n", + "obj = 8254590409.96973\n", + " x = [1.0, 1.0, 0.0, 1.0, 1.0]\n", + " y = [935662.0949263407, 1604270.0218116897, 0.0, 1369560.835229226, 602828.5321028307]\n" ] } ], "source": [ - "# Construct model using previously defined functions\n", - "data = random_uc_data(samples=1, n=50)[0]\n", + "data = random_uc_data(samples=1, n=500)[0]\n", "model = build_uc_model(data)\n", - "\n", - "# Solve model using ML + Gurobi\n", - "solver_ml.solve(model)\n", - "\n", - "# Print part of the optimal solution\n", - "print(\"obj =\", model.obj())\n", - "print(\" x =\", [model.x[i].value for i in range(5)])\n", - "print(\" y =\", [model.y[i].value for i in range(5)])" + "solver_ml.optimize(model)\n", + "print(\"obj =\", model.inner.obj())\n", + "print(\" x =\", [model.inner.x[i].value for i in range(5)])\n", + "print(\" y =\", [model.inner.y[i].value for i in range(5)])" ] }, { @@ -603,7 +847,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, diff --git a/0.3/_static/__pycache__/__init__.cpython-39.pyc b/0.3/_static/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000..82499f4 Binary files /dev/null and b/0.3/_static/__pycache__/__init__.cpython-39.pyc differ diff --git a/0.3/_static/nbsphinx-broken-thumbnail.svg b/0.3/_static/nbsphinx-broken-thumbnail.svg new file mode 100644 index 0000000..4919ca8 --- /dev/null +++ b/0.3/_static/nbsphinx-broken-thumbnail.svg @@ -0,0 +1,9 @@ + + + + diff --git a/0.3/_static/nbsphinx-code-cells.css b/0.3/_static/nbsphinx-code-cells.css new file mode 100644 index 0000000..a3fb27c --- /dev/null +++ b/0.3/_static/nbsphinx-code-cells.css @@ -0,0 +1,259 @@ +/* remove conflicting styling from Sphinx themes */ +div.nbinput.container div.prompt *, +div.nboutput.container div.prompt *, +div.nbinput.container div.input_area pre, +div.nboutput.container div.output_area pre, +div.nbinput.container div.input_area .highlight, +div.nboutput.container div.output_area .highlight { + border: none; + padding: 0; + margin: 0; + box-shadow: none; +} + +div.nbinput.container > div[class*=highlight], +div.nboutput.container > div[class*=highlight] { + margin: 0; +} + +div.nbinput.container div.prompt *, +div.nboutput.container div.prompt * { + background: none; +} + +div.nboutput.container div.output_area .highlight, +div.nboutput.container div.output_area pre { + background: unset; +} + +div.nboutput.container div.output_area div.highlight { + color: unset; /* override Pygments text color */ +} + +/* avoid gaps between output lines */ +div.nboutput.container div[class*=highlight] pre { + line-height: normal; +} + +/* input/output containers */ +div.nbinput.container, +div.nboutput.container { + display: -webkit-flex; + display: flex; + align-items: flex-start; + margin: 0; + width: 100%; +} +@media (max-width: 540px) { + div.nbinput.container, + div.nboutput.container { + flex-direction: column; + } +} + +/* input container */ +div.nbinput.container { + padding-top: 5px; +} + +/* last container */ +div.nblast.container { + padding-bottom: 5px; +} + +/* input prompt */ +div.nbinput.container div.prompt pre, +/* for sphinx_immaterial theme: */ +div.nbinput.container div.prompt pre > code { + color: #307FC1; +} + +/* output prompt */ +div.nboutput.container div.prompt pre, +/* for sphinx_immaterial theme: */ +div.nboutput.container div.prompt pre > code { + color: #BF5B3D; +} + +/* all prompts */ +div.nbinput.container div.prompt, +div.nboutput.container div.prompt { + width: 4.5ex; + padding-top: 5px; + position: relative; + user-select: none; +} + +div.nbinput.container div.prompt > div, +div.nboutput.container div.prompt > div { + position: absolute; + right: 0; + margin-right: 0.3ex; +} + +@media (max-width: 540px) { + div.nbinput.container div.prompt, + div.nboutput.container div.prompt { + width: unset; + text-align: left; + padding: 0.4em; + } + div.nboutput.container div.prompt.empty { + padding: 0; + } + + div.nbinput.container div.prompt > div, + div.nboutput.container div.prompt > div { + position: unset; + } +} + +/* disable scrollbars and line breaks on prompts */ +div.nbinput.container div.prompt pre, +div.nboutput.container div.prompt pre { + overflow: hidden; + white-space: pre; +} + +/* input/output area */ +div.nbinput.container div.input_area, +div.nboutput.container div.output_area { + -webkit-flex: 1; + flex: 1; + overflow: auto; +} +@media (max-width: 540px) { + div.nbinput.container div.input_area, + div.nboutput.container div.output_area { + width: 100%; + } +} + +/* input area */ +div.nbinput.container div.input_area { + border: 1px solid #e0e0e0; + border-radius: 2px; + /*background: #f5f5f5;*/ +} + +/* override MathJax center alignment in output cells */ +div.nboutput.container div[class*=MathJax] { + text-align: left !important; +} + +/* override sphinx.ext.imgmath center alignment in output cells */ +div.nboutput.container div.math p { + text-align: left; +} + +/* standard error */ +div.nboutput.container div.output_area.stderr { + background: #fdd; +} + +/* ANSI colors */ +.ansi-black-fg { color: #3E424D; } +.ansi-black-bg { background-color: #3E424D; } +.ansi-black-intense-fg { color: #282C36; } +.ansi-black-intense-bg { background-color: #282C36; } +.ansi-red-fg { color: #E75C58; } +.ansi-red-bg { background-color: #E75C58; } +.ansi-red-intense-fg { color: #B22B31; } +.ansi-red-intense-bg { background-color: #B22B31; } +.ansi-green-fg { color: #00A250; } +.ansi-green-bg { background-color: #00A250; } +.ansi-green-intense-fg { color: #007427; } +.ansi-green-intense-bg { background-color: #007427; } +.ansi-yellow-fg { color: #DDB62B; } +.ansi-yellow-bg { background-color: #DDB62B; } +.ansi-yellow-intense-fg { color: #B27D12; } +.ansi-yellow-intense-bg { background-color: #B27D12; } +.ansi-blue-fg { color: #208FFB; } +.ansi-blue-bg { background-color: #208FFB; } +.ansi-blue-intense-fg { color: #0065CA; } +.ansi-blue-intense-bg { background-color: #0065CA; } +.ansi-magenta-fg { color: #D160C4; } +.ansi-magenta-bg { background-color: #D160C4; } +.ansi-magenta-intense-fg { color: #A03196; } +.ansi-magenta-intense-bg { background-color: #A03196; } +.ansi-cyan-fg { color: #60C6C8; } +.ansi-cyan-bg { background-color: #60C6C8; } +.ansi-cyan-intense-fg { color: #258F8F; } +.ansi-cyan-intense-bg { background-color: #258F8F; } +.ansi-white-fg { color: #C5C1B4; } +.ansi-white-bg { background-color: #C5C1B4; } +.ansi-white-intense-fg { color: #A1A6B2; } +.ansi-white-intense-bg { background-color: #A1A6B2; } + +.ansi-default-inverse-fg { color: #FFFFFF; } +.ansi-default-inverse-bg { background-color: #000000; } + +.ansi-bold { font-weight: bold; } +.ansi-underline { text-decoration: underline; } + + +div.nbinput.container div.input_area div[class*=highlight] > pre, +div.nboutput.container div.output_area div[class*=highlight] > pre, +div.nboutput.container div.output_area div[class*=highlight].math, +div.nboutput.container div.output_area.rendered_html, +div.nboutput.container div.output_area > div.output_javascript, +div.nboutput.container div.output_area:not(.rendered_html) > img{ + padding: 5px; + margin: 0; +} + +/* fix copybtn overflow problem in chromium (needed for 'sphinx_copybutton') */ +div.nbinput.container div.input_area > div[class^='highlight'], +div.nboutput.container div.output_area > div[class^='highlight']{ + overflow-y: hidden; +} + +/* hide copy button on prompts for 'sphinx_copybutton' extension ... */ +.prompt .copybtn, +/* ... and 'sphinx_immaterial' theme */ +.prompt .md-clipboard.md-icon { + display: none; +} + +/* Some additional styling taken form the Jupyter notebook CSS */ +.jp-RenderedHTMLCommon table, +div.rendered_html table { + border: none; + border-collapse: collapse; + border-spacing: 0; + color: black; + font-size: 12px; + table-layout: fixed; +} +.jp-RenderedHTMLCommon thead, +div.rendered_html thead { + border-bottom: 1px solid black; + vertical-align: bottom; +} +.jp-RenderedHTMLCommon tr, +.jp-RenderedHTMLCommon th, +.jp-RenderedHTMLCommon td, +div.rendered_html tr, +div.rendered_html th, +div.rendered_html td { + text-align: right; + vertical-align: middle; + padding: 0.5em 0.5em; + line-height: normal; + white-space: normal; + max-width: none; + border: none; +} +.jp-RenderedHTMLCommon th, +div.rendered_html th { + font-weight: bold; +} +.jp-RenderedHTMLCommon tbody tr:nth-child(odd), +div.rendered_html tbody tr:nth-child(odd) { + background: #f5f5f5; +} +.jp-RenderedHTMLCommon tbody tr:hover, +div.rendered_html tbody tr:hover { + background: rgba(66, 165, 245, 0.2); +} + diff --git a/0.3/_static/nbsphinx-gallery.css b/0.3/_static/nbsphinx-gallery.css new file mode 100644 index 0000000..365c27a --- /dev/null +++ b/0.3/_static/nbsphinx-gallery.css @@ -0,0 +1,31 @@ +.nbsphinx-gallery { + display: grid; + grid-template-columns: repeat(auto-fill, minmax(160px, 1fr)); + gap: 5px; + margin-top: 1em; + margin-bottom: 1em; +} + +.nbsphinx-gallery > a { + padding: 5px; + border: 1px dotted currentColor; + border-radius: 2px; + text-align: center; +} + +.nbsphinx-gallery > a:hover { + border-style: solid; +} + +.nbsphinx-gallery img { + max-width: 100%; + max-height: 100%; +} + +.nbsphinx-gallery > a > div:first-child { + display: flex; + align-items: start; + justify-content: center; + height: 120px; + margin-bottom: 5px; +} diff --git a/0.3/_static/nbsphinx-no-thumbnail.svg b/0.3/_static/nbsphinx-no-thumbnail.svg new file mode 100644 index 0000000..9dca758 --- /dev/null +++ b/0.3/_static/nbsphinx-no-thumbnail.svg @@ -0,0 +1,9 @@ + + + + diff --git a/0.3/_static/pygments.css b/0.3/_static/pygments.css index f227e5c..c5d3a66 100644 --- a/0.3/_static/pygments.css +++ b/0.3/_static/pygments.css @@ -54,7 +54,6 @@ span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: .highlight .nt { color: #204a87; font-weight: bold } /* Name.Tag */ .highlight .nv { color: #000000 } /* Name.Variable */ .highlight .ow { color: #204a87; font-weight: bold } /* Operator.Word */ -.highlight .pm { color: #000000; font-weight: bold } /* Punctuation.Marker */ .highlight .w { color: #f8f8f8 } /* Text.Whitespace */ .highlight .mb { color: #0000cf; font-weight: bold } /* Literal.Number.Bin */ .highlight .mf { color: #0000cf; font-weight: bold } /* Literal.Number.Float */ diff --git a/0.3/api/collectors/index.html b/0.3/api/collectors/index.html index bf0ce05..844872e 100644 --- a/0.3/api/collectors/index.html +++ b/0.3/api/collectors/index.html @@ -5,7 +5,7 @@ - 7. Collectors & Extractors — MIPLearn 0.3 + 10. Collectors & Extractors — MIPLearn 0.3 @@ -36,8 +36,8 @@ - - + + @@ -66,6 +66,28 @@