mirror of
https://github.com/ANL-CEEESA/MIPLearn.git
synced 2025-12-06 09:28:51 -06:00
Make package work with persistent solvers; update README.md
This commit is contained in:
35
README.md
35
README.md
@@ -7,9 +7,8 @@ Table of contents
|
|||||||
-----------------
|
-----------------
|
||||||
* [Features](#features)
|
* [Features](#features)
|
||||||
* [Installation](#installation)
|
* [Installation](#installation)
|
||||||
* [Basic usage](#basic-usage)
|
* [Basic Usage](#basic-usage)
|
||||||
* [Using LearningSolver](#using-learningsolver)
|
* [Using LearningSolver](#using-learningsolver)
|
||||||
* [Selecting the internal MIP solver](#selecting-the-internal-mip-solver)
|
|
||||||
* [Describing problem instances](#describing-problem-instances)
|
* [Describing problem instances](#describing-problem-instances)
|
||||||
* [Obtaining heuristic solutions](#obtaining-heuristic-solutions)
|
* [Obtaining heuristic solutions](#obtaining-heuristic-solutions)
|
||||||
* [Saving and loading solver state](#saving-and-loading-solver-state)
|
* [Saving and loading solver state](#saving-and-loading-solver-state)
|
||||||
@@ -17,6 +16,8 @@ Table of contents
|
|||||||
* [Benchmarking](#benchmarking)
|
* [Benchmarking](#benchmarking)
|
||||||
* [Using BenchmarkRunner](#using-benchmarkrunner)
|
* [Using BenchmarkRunner](#using-benchmarkrunner)
|
||||||
* [Saving and loading benchmark results](#saving-and-loading-benchmark-results)
|
* [Saving and loading benchmark results](#saving-and-loading-benchmark-results)
|
||||||
|
* [Customization](#customization)
|
||||||
|
* [Selecting the internal MIP solver](#selecting-the-internal-mip-solver)
|
||||||
* [Current Limitations](#current-limitations)
|
* [Current Limitations](#current-limitations)
|
||||||
* [References](#references)
|
* [References](#references)
|
||||||
* [Authors](#authors)
|
* [Authors](#authors)
|
||||||
@@ -60,17 +61,6 @@ for instance in all_instances:
|
|||||||
|
|
||||||
During the first call to `solver.solve(instance)`, the solver will process the instance from scratch, since no historical information is available, but it will already start gathering information. By calling `solver.fit()`, we instruct the solver to train all the internal Machine Learning models based on the information gathered so far. As this operation can be expensive, it may be performed after a larger batch of instances has been solved, instead of after every solve. After the first call to `solver.fit()`, subsequent calls to `solver.solve(instance)` will automatically use the trained Machine Learning models to accelerate the solution process.
|
During the first call to `solver.solve(instance)`, the solver will process the instance from scratch, since no historical information is available, but it will already start gathering information. By calling `solver.fit()`, we instruct the solver to train all the internal Machine Learning models based on the information gathered so far. As this operation can be expensive, it may be performed after a larger batch of instances has been solved, instead of after every solve. After the first call to `solver.fit()`, subsequent calls to `solver.solve(instance)` will automatically use the trained Machine Learning models to accelerate the solution process.
|
||||||
|
|
||||||
### Selecting the internal MIP solver
|
|
||||||
|
|
||||||
By default, `LearningSolver` uses Gurobi as its internal MIP solver. Alternative solvers can be specified through the `parent_solver`a argument, as follows. To select CPLEX, for example:
|
|
||||||
```python
|
|
||||||
from miplearn import LearningSolver
|
|
||||||
import pyomo.environ as pe
|
|
||||||
|
|
||||||
cplex = pe.SolverFactory("cplex")
|
|
||||||
solver = LearningSolver(parent_solver=cplex)
|
|
||||||
```
|
|
||||||
|
|
||||||
### Describing problem instances
|
### Describing problem instances
|
||||||
|
|
||||||
Instances to be solved by `LearningSolver` must derive from the abstract class `miplearn.Instance`. The following three abstract methods must be implemented:
|
Instances to be solved by `LearningSolver` must derive from the abstract class `miplearn.Instance`. The following three abstract methods must be implemented:
|
||||||
@@ -198,6 +188,25 @@ benchmark.load_fit("training_data.bin")
|
|||||||
benchmark.parallel_solve(test_instances)
|
benchmark.parallel_solve(test_instances)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Customization
|
||||||
|
-------------
|
||||||
|
|
||||||
|
### Selecting the internal MIP solver
|
||||||
|
|
||||||
|
By default, `LearningSolver` uses [Gurobi](https://www.gurobi.com/) as its internal MIP solver. Alternative solvers can be specified through the `internal_solver_factory` constructor argument. This argument should provide a function (with no arguments) that constructs, configures and returns the desired solver. To select CPLEX, for example:
|
||||||
|
```python
|
||||||
|
from miplearn import LearningSolver
|
||||||
|
import pyomo.environ as pe
|
||||||
|
|
||||||
|
def cplex_factory():
|
||||||
|
cplex = pe.SolverFactory("cplex_persistent")
|
||||||
|
cplex.options["threads"] = 4
|
||||||
|
return cplex
|
||||||
|
|
||||||
|
solver = LearningSolver(internal_solver_factory=cplex_factory)
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
Current Limitations
|
Current Limitations
|
||||||
-------------------
|
-------------------
|
||||||
|
|
||||||
|
|||||||
@@ -13,6 +13,10 @@ from joblib import Parallel, delayed
|
|||||||
import multiprocessing
|
import multiprocessing
|
||||||
|
|
||||||
|
|
||||||
|
def _gurobi_factory():
|
||||||
|
solver = pe.SolverFactory('gurobi_persistent')
|
||||||
|
solver.options["threads"] = 4
|
||||||
|
return solver
|
||||||
|
|
||||||
class LearningSolver:
|
class LearningSolver:
|
||||||
"""
|
"""
|
||||||
@@ -22,11 +26,11 @@ class LearningSolver:
|
|||||||
|
|
||||||
def __init__(self,
|
def __init__(self,
|
||||||
threads=4,
|
threads=4,
|
||||||
parent_solver=pe.SolverFactory('gurobi'),
|
internal_solver_factory=_gurobi_factory,
|
||||||
ws_predictor=KnnWarmStartPredictor(),
|
ws_predictor=KnnWarmStartPredictor(),
|
||||||
mode="exact"):
|
mode="exact"):
|
||||||
self.parent_solver = parent_solver
|
self.internal_solver_factory = internal_solver_factory
|
||||||
self.parent_solver.options["threads"] = threads
|
self.internal_solver = self.internal_solver_factory()
|
||||||
self.mode = mode
|
self.mode = mode
|
||||||
self.x_train = {}
|
self.x_train = {}
|
||||||
self.y_train = {}
|
self.y_train = {}
|
||||||
@@ -86,8 +90,11 @@ class LearningSolver:
|
|||||||
return solve_results
|
return solve_results
|
||||||
|
|
||||||
def parallel_solve(self, instances, n_jobs=4, label="Solve"):
|
def parallel_solve(self, instances, n_jobs=4, label="Solve"):
|
||||||
|
self.parentSolver = None
|
||||||
|
|
||||||
def _process(instance):
|
def _process(instance):
|
||||||
solver = deepcopy(self)
|
solver = copy(self)
|
||||||
|
solver.internal_solver = solver.internal_solver_factory()
|
||||||
results = solver.solve(instance)
|
results = solver.solve(instance)
|
||||||
return {
|
return {
|
||||||
"x_train": solver.x_train,
|
"x_train": solver.x_train,
|
||||||
@@ -143,8 +150,8 @@ class LearningSolver:
|
|||||||
self.ws_predictors = self.ws_predictors
|
self.ws_predictors = self.ws_predictors
|
||||||
|
|
||||||
def _solve(self, model, tee=False):
|
def _solve(self, model, tee=False):
|
||||||
if hasattr(self.parent_solver, "set_instance"):
|
if hasattr(self.internal_solver, "set_instance"):
|
||||||
self.parent_solver.set_instance(model)
|
self.internal_solver.set_instance(model)
|
||||||
return self.parent_solver.solve(tee=tee, warmstart=True)
|
return self.internal_solver.solve(tee=tee, warmstart=True)
|
||||||
else:
|
else:
|
||||||
return self.parent_solver.solve(model, tee=tee, warmstart=True)
|
return self.internal_solver.solve(model, tee=tee, warmstart=True)
|
||||||
|
|||||||
Reference in New Issue
Block a user