mirror of
https://github.com/ANL-CEEESA/MIPLearn.git
synced 2025-12-06 01:18:52 -06:00
Make package work with persistent solvers; update README.md
This commit is contained in:
49
README.md
49
README.md
@@ -5,22 +5,23 @@ MIPLearn
|
||||
|
||||
Table of contents
|
||||
-----------------
|
||||
* [Features](#features)
|
||||
* [Installation](#installation)
|
||||
* [Basic usage](#basic-usage)
|
||||
* [Features](#features)
|
||||
* [Installation](#installation)
|
||||
* [Basic Usage](#basic-usage)
|
||||
* [Using LearningSolver](#using-learningsolver)
|
||||
* [Selecting the internal MIP solver](#selecting-the-internal-mip-solver)
|
||||
* [Describing problem instances](#describing-problem-instances)
|
||||
* [Obtaining heuristic solutions](#obtaining-heuristic-solutions)
|
||||
* [Saving and loading solver state](#saving-and-loading-solver-state)
|
||||
* [Solving training instances in parallel](#solving-training-instances-in-parallel)
|
||||
* [Benchmarking](#benchmarking)
|
||||
* [Benchmarking](#benchmarking)
|
||||
* [Using BenchmarkRunner](#using-benchmarkrunner)
|
||||
* [Saving and loading benchmark results](#saving-and-loading-benchmark-results)
|
||||
* [Current Limitations](#current-limitations)
|
||||
* [References](#references)
|
||||
* [Authors](#authors)
|
||||
* [License](#license)
|
||||
* [Customization](#customization)
|
||||
* [Selecting the internal MIP solver](#selecting-the-internal-mip-solver)
|
||||
* [Current Limitations](#current-limitations)
|
||||
* [References](#references)
|
||||
* [Authors](#authors)
|
||||
* [License](#license)
|
||||
|
||||
Features
|
||||
--------
|
||||
@@ -60,17 +61,6 @@ for instance in all_instances:
|
||||
|
||||
During the first call to `solver.solve(instance)`, the solver will process the instance from scratch, since no historical information is available, but it will already start gathering information. By calling `solver.fit()`, we instruct the solver to train all the internal Machine Learning models based on the information gathered so far. As this operation can be expensive, it may be performed after a larger batch of instances has been solved, instead of after every solve. After the first call to `solver.fit()`, subsequent calls to `solver.solve(instance)` will automatically use the trained Machine Learning models to accelerate the solution process.
|
||||
|
||||
### Selecting the internal MIP solver
|
||||
|
||||
By default, `LearningSolver` uses Gurobi as its internal MIP solver. Alternative solvers can be specified through the `parent_solver`a argument, as follows. To select CPLEX, for example:
|
||||
```python
|
||||
from miplearn import LearningSolver
|
||||
import pyomo.environ as pe
|
||||
|
||||
cplex = pe.SolverFactory("cplex")
|
||||
solver = LearningSolver(parent_solver=cplex)
|
||||
```
|
||||
|
||||
### Describing problem instances
|
||||
|
||||
Instances to be solved by `LearningSolver` must derive from the abstract class `miplearn.Instance`. The following three abstract methods must be implemented:
|
||||
@@ -198,6 +188,25 @@ benchmark.load_fit("training_data.bin")
|
||||
benchmark.parallel_solve(test_instances)
|
||||
```
|
||||
|
||||
Customization
|
||||
-------------
|
||||
|
||||
### Selecting the internal MIP solver
|
||||
|
||||
By default, `LearningSolver` uses [Gurobi](https://www.gurobi.com/) as its internal MIP solver. Alternative solvers can be specified through the `internal_solver_factory` constructor argument. This argument should provide a function (with no arguments) that constructs, configures and returns the desired solver. To select CPLEX, for example:
|
||||
```python
|
||||
from miplearn import LearningSolver
|
||||
import pyomo.environ as pe
|
||||
|
||||
def cplex_factory():
|
||||
cplex = pe.SolverFactory("cplex_persistent")
|
||||
cplex.options["threads"] = 4
|
||||
return cplex
|
||||
|
||||
solver = LearningSolver(internal_solver_factory=cplex_factory)
|
||||
```
|
||||
|
||||
|
||||
Current Limitations
|
||||
-------------------
|
||||
|
||||
|
||||
@@ -13,6 +13,10 @@ from joblib import Parallel, delayed
|
||||
import multiprocessing
|
||||
|
||||
|
||||
def _gurobi_factory():
|
||||
solver = pe.SolverFactory('gurobi_persistent')
|
||||
solver.options["threads"] = 4
|
||||
return solver
|
||||
|
||||
class LearningSolver:
|
||||
"""
|
||||
@@ -22,11 +26,11 @@ class LearningSolver:
|
||||
|
||||
def __init__(self,
|
||||
threads=4,
|
||||
parent_solver=pe.SolverFactory('gurobi'),
|
||||
internal_solver_factory=_gurobi_factory,
|
||||
ws_predictor=KnnWarmStartPredictor(),
|
||||
mode="exact"):
|
||||
self.parent_solver = parent_solver
|
||||
self.parent_solver.options["threads"] = threads
|
||||
self.internal_solver_factory = internal_solver_factory
|
||||
self.internal_solver = self.internal_solver_factory()
|
||||
self.mode = mode
|
||||
self.x_train = {}
|
||||
self.y_train = {}
|
||||
@@ -86,8 +90,11 @@ class LearningSolver:
|
||||
return solve_results
|
||||
|
||||
def parallel_solve(self, instances, n_jobs=4, label="Solve"):
|
||||
self.parentSolver = None
|
||||
|
||||
def _process(instance):
|
||||
solver = deepcopy(self)
|
||||
solver = copy(self)
|
||||
solver.internal_solver = solver.internal_solver_factory()
|
||||
results = solver.solve(instance)
|
||||
return {
|
||||
"x_train": solver.x_train,
|
||||
@@ -143,8 +150,8 @@ class LearningSolver:
|
||||
self.ws_predictors = self.ws_predictors
|
||||
|
||||
def _solve(self, model, tee=False):
|
||||
if hasattr(self.parent_solver, "set_instance"):
|
||||
self.parent_solver.set_instance(model)
|
||||
return self.parent_solver.solve(tee=tee, warmstart=True)
|
||||
if hasattr(self.internal_solver, "set_instance"):
|
||||
self.internal_solver.set_instance(model)
|
||||
return self.internal_solver.solve(tee=tee, warmstart=True)
|
||||
else:
|
||||
return self.parent_solver.solve(model, tee=tee, warmstart=True)
|
||||
return self.internal_solver.solve(model, tee=tee, warmstart=True)
|
||||
|
||||
Reference in New Issue
Block a user