Merge branch 'feature/training_sample' into dev

master
Alinson S. Xavier 5 years ago
commit 05497cab07

@ -3,3 +3,4 @@ ignore_missing_imports = True
#disallow_untyped_defs = True
disallow_untyped_calls = True
disallow_incomplete_defs = True
pretty = True

@ -2,37 +2,31 @@
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from .extractors import (
SolutionExtractor,
InstanceFeaturesExtractor,
ObjectiveValueExtractor,
VariableFeaturesExtractor,
)
from .benchmark import BenchmarkRunner
from .classifiers import Classifier, Regressor
from .classifiers.adaptive import AdaptiveClassifier
from .classifiers.threshold import MinPrecisionThreshold
from .components.component import Component
from .components.objective import ObjectiveValueComponent
from .components.cuts import UserCutsComponent
from .components.lazy_dynamic import DynamicLazyConstraintsComponent
from .components.lazy_static import StaticLazyConstraintsComponent
from .components.cuts import UserCutsComponent
from .components.objective import ObjectiveValueComponent
from .components.primal import PrimalSolutionComponent
from .components.relaxation import RelaxationComponent
from .components.steps.convert_tight import ConvertTightIneqsIntoEqsStep
from .components.steps.relax_integrality import RelaxIntegralityStep
from .components.steps.drop_redundant import DropRedundantInequalitiesStep
from .classifiers import Classifier, Regressor
from .classifiers.adaptive import AdaptiveClassifier
from .classifiers.threshold import MinPrecisionThreshold
from .benchmark import BenchmarkRunner
from .components.steps.relax_integrality import RelaxIntegralityStep
from .extractors import (
SolutionExtractor,
InstanceFeaturesExtractor,
ObjectiveValueExtractor,
VariableFeaturesExtractor,
)
from .instance import Instance
from .solvers.pyomo.base import BasePyomoSolver
from .solvers.pyomo.cplex import CplexPyomoSolver
from .solvers.pyomo.gurobi import GurobiPyomoSolver
from .log import setup_logger
from .solvers.gurobi import GurobiSolver
from .solvers.internal import InternalSolver
from .solvers.learning import LearningSolver
from .log import setup_logger
from .solvers.pyomo.base import BasePyomoSolver
from .solvers.pyomo.cplex import CplexPyomoSolver
from .solvers.pyomo.gurobi import GurobiPyomoSolver

@ -2,15 +2,14 @@
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
import logging
import os
from copy import deepcopy
import pandas as pd
import numpy as np
import logging
from tqdm.auto import tqdm
import os
from .solvers.learning import LearningSolver
from miplearn.solvers.learning import LearningSolver
class BenchmarkRunner:

@ -5,14 +5,15 @@
import logging
from copy import deepcopy
from miplearn.classifiers import Classifier
from miplearn.classifiers.counting import CountingClassifier
from miplearn.classifiers.evaluator import ClassifierEvaluator
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from miplearn.classifiers import Classifier
from miplearn.classifiers.counting import CountingClassifier
from miplearn.classifiers.evaluator import ClassifierEvaluator
logger = logging.getLogger(__name__)

@ -2,9 +2,10 @@
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from miplearn.classifiers import Classifier
import numpy as np
from miplearn.classifiers import Classifier
class CountingClassifier(Classifier):
"""

@ -2,15 +2,15 @@
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
import logging
from copy import deepcopy
import numpy as np
from miplearn.classifiers import Classifier
from sklearn.dummy import DummyClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
import logging
from miplearn.classifiers import Classifier
logger = logging.getLogger(__name__)

@ -1,11 +1,12 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from miplearn.classifiers.counting import CountingClassifier
import numpy as np
from numpy.linalg import norm
from miplearn.classifiers.counting import CountingClassifier
E = 0.1

@ -3,11 +3,12 @@
# Released under the modified BSD license. See COPYING.md for more details.
import numpy as np
from miplearn.classifiers.cv import CrossValidatedClassifier
from numpy.linalg import norm
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from miplearn.classifiers.cv import CrossValidatedClassifier
E = 0.1

@ -3,9 +3,10 @@
# Released under the modified BSD license. See COPYING.md for more details.
import numpy as np
from miplearn.classifiers.evaluator import ClassifierEvaluator
from sklearn.neighbors import KNeighborsClassifier
from miplearn.classifiers.evaluator import ClassifierEvaluator
def test_evaluator():
clf_a = KNeighborsClassifier(n_neighbors=1)

@ -5,6 +5,7 @@
from unittest.mock import Mock
import numpy as np
from miplearn.classifiers import Classifier
from miplearn.classifiers.threshold import MinPrecisionThreshold

@ -2,7 +2,7 @@
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from miplearn import Component
from miplearn.components.component import Component
class CompositeComponent(Component):

@ -2,14 +2,17 @@
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
import logging
import sys
from copy import deepcopy
import numpy as np
from tqdm.auto import tqdm
from miplearn.classifiers.counting import CountingClassifier
from miplearn.components import classifier_evaluation_dict
from .component import Component
from ..extractors import *
from miplearn.components.component import Component
from miplearn.extractors import InstanceFeaturesExtractor
logger = logging.getLogger(__name__)

@ -2,14 +2,17 @@
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
import logging
import sys
from copy import deepcopy
import numpy as np
from tqdm.auto import tqdm
from miplearn.classifiers.counting import CountingClassifier
from miplearn.components import classifier_evaluation_dict
from .component import Component
from ..extractors import *
from miplearn.components.component import Component
from miplearn.extractors import InstanceFeaturesExtractor, InstanceIterator
logger = logging.getLogger(__name__)

@ -2,12 +2,15 @@
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
import logging
import sys
from copy import deepcopy
import numpy as np
from tqdm.auto import tqdm
from miplearn.classifiers.counting import CountingClassifier
from .component import Component
from ..extractors import *
from miplearn.components.component import Component
logger = logging.getLogger(__name__)

@ -1,6 +1,12 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
import logging
from copy import deepcopy
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.metrics import (
mean_squared_error,
explained_variance_score,
@ -9,11 +15,8 @@ from sklearn.metrics import (
r2_score,
)
from .. import Component, InstanceFeaturesExtractor, ObjectiveValueExtractor
from sklearn.linear_model import LinearRegression
from copy import deepcopy
import numpy as np
import logging
from miplearn.components.component import Component
from miplearn.extractors import InstanceFeaturesExtractor, ObjectiveValueExtractor
logger = logging.getLogger(__name__)
@ -75,7 +78,15 @@ class ObjectiveValueComponent(Component):
def evaluate(self, instances):
y_pred = self.predict(instances)
y_true = np.array([[inst.lower_bound, inst.upper_bound] for inst in instances])
y_true = np.array(
[
[
inst.training_data[0]["Lower bound"],
inst.training_data[0]["Upper bound"],
]
for inst in instances
]
)
y_true_lb, y_true_ub = y_true[:, 0], y_true[:, 1]
y_pred_lb, y_pred_ub = y_pred[:, 1], y_pred[:, 1]
ev = {

@ -68,7 +68,8 @@ class PrimalSolutionComponent(Component):
for label in [0, 1]:
y_train = solutions[category][:, label].astype(int)
# If all samples are either positive or negative, make constant predictions
# If all samples are either positive or negative, make constant
# predictions
y_avg = np.average(y_train)
if y_avg < 0.001 or y_avg >= 0.999:
self.classifiers[category, label] = round(y_avg)
@ -130,7 +131,7 @@ class PrimalSolutionComponent(Component):
desc="Evaluate (primal)",
):
instance = instances[instance_idx]
solution_actual = instance.solution
solution_actual = instance.training_data[0]["Solution"]
solution_pred = self.predict(instance)
vars_all, vars_one, vars_zero = set(), set(), set()

@ -4,8 +4,8 @@
import logging
from miplearn import Component
from miplearn.classifiers.counting import CountingClassifier
from miplearn.components.component import Component
from miplearn.components.composite import CompositeComponent
from miplearn.components.steps.convert_tight import ConvertTightIneqsIntoEqsStep
from miplearn.components.steps.drop_redundant import DropRedundantInequalitiesStep

@ -3,17 +3,17 @@
# Released under the modified BSD license. See COPYING.md for more details.
import logging
import random
from copy import deepcopy
import numpy as np
from tqdm import tqdm
import random
from ... import Component
from ...classifiers.counting import CountingClassifier
from ...components import classifier_evaluation_dict
from ...extractors import InstanceIterator
from .drop_redundant import DropRedundantInequalitiesStep
from miplearn.classifiers.counting import CountingClassifier
from miplearn.components import classifier_evaluation_dict
from miplearn.components.component import Component
from miplearn.components.steps.drop_redundant import DropRedundantInequalitiesStep
from miplearn.extractors import InstanceIterator
logger = logging.getLogger(__name__)

@ -8,9 +8,9 @@ from copy import deepcopy
import numpy as np
from tqdm import tqdm
from miplearn import Component
from miplearn.classifiers.counting import CountingClassifier
from miplearn.components import classifier_evaluation_dict
from miplearn.components.component import Component
from miplearn.components.lazy_static import LazyConstraint
from miplearn.extractors import InstanceIterator

@ -4,7 +4,7 @@
import logging
from miplearn import Component
from miplearn.components.component import Component
logger = logging.getLogger(__name__)

@ -1,9 +1,12 @@
from miplearn import LearningSolver, GurobiSolver, Instance, Classifier
from unittest.mock import Mock
from miplearn.classifiers import Classifier
from miplearn.components.steps.convert_tight import ConvertTightIneqsIntoEqsStep
from miplearn.components.steps.relax_integrality import RelaxIntegralityStep
from miplearn.instance import Instance
from miplearn.problems.knapsack import GurobiKnapsackInstance
from unittest.mock import Mock
from miplearn.solvers.gurobi import GurobiSolver
from miplearn.solvers.learning import LearningSolver
def test_convert_tight_usage():
@ -21,8 +24,8 @@ def test_convert_tight_usage():
)
# Solve original problem
solver.solve(instance)
original_upper_bound = instance.upper_bound
stats = solver.solve(instance)
original_upper_bound = stats["Upper bound"]
# Should collect training data
assert instance.training_data[0]["slacks"]["eq_capacity"] == 0.0
@ -32,15 +35,14 @@ def test_convert_tight_usage():
stats = solver.solve(instance)
# Objective value should be the same
assert instance.upper_bound == original_upper_bound
assert stats["Upper bound"] == original_upper_bound
assert stats["ConvertTight: Inf iterations"] == 0
assert stats["ConvertTight: Subopt iterations"] == 0
class TestInstance(Instance):
class SampleInstance(Instance):
def to_model(self):
import gurobipy as grb
from gurobipy import GRB
m = grb.Model("model")
x1 = m.addVar(name="x1")
@ -68,9 +70,9 @@ def test_convert_tight_infeasibility():
components=[comp],
solve_lp_first=False,
)
instance = TestInstance()
instance = SampleInstance()
stats = solver.solve(instance)
assert instance.lower_bound == 5.0
assert stats["Upper bound"] == 5.0
assert stats["ConvertTight: Inf iterations"] == 1
assert stats["ConvertTight: Subopt iterations"] == 0
@ -91,9 +93,9 @@ def test_convert_tight_suboptimality():
components=[comp],
solve_lp_first=False,
)
instance = TestInstance()
instance = SampleInstance()
stats = solver.solve(instance)
assert instance.lower_bound == 5.0
assert stats["Upper bound"] == 5.0
assert stats["ConvertTight: Inf iterations"] == 0
assert stats["ConvertTight: Subopt iterations"] == 1
@ -114,8 +116,8 @@ def test_convert_tight_optimal():
components=[comp],
solve_lp_first=False,
)
instance = TestInstance()
instance = SampleInstance()
stats = solver.solve(instance)
assert instance.lower_bound == 5.0
assert stats["Upper bound"] == 5.0
assert stats["ConvertTight: Inf iterations"] == 0
assert stats["ConvertTight: Subopt iterations"] == 0

@ -2,21 +2,15 @@
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
import numpy as np
from unittest.mock import Mock, call
from miplearn import (
LearningSolver,
Instance,
InternalSolver,
GurobiSolver,
)
import numpy as np
from miplearn.classifiers import Classifier
from miplearn.components.relaxation import (
DropRedundantInequalitiesStep,
RelaxIntegralityStep,
)
from miplearn.problems.knapsack import GurobiKnapsackInstance
from miplearn.components.relaxation import DropRedundantInequalitiesStep
from miplearn.instance import Instance
from miplearn.solvers.internal import InternalSolver
from miplearn.solvers.learning import LearningSolver
def _setup():

@ -4,8 +4,10 @@
from unittest.mock import Mock, call
from miplearn import Component, LearningSolver, Instance
from miplearn.components.component import Component
from miplearn.components.composite import CompositeComponent
from miplearn.instance import Instance
from miplearn.solvers.learning import LearningSolver
def test_composite():

@ -5,10 +5,13 @@
from unittest.mock import Mock
import numpy as np
from miplearn import DynamicLazyConstraintsComponent, LearningSolver, InternalSolver
from numpy.linalg import norm
from miplearn.classifiers import Classifier
from miplearn.components.lazy_dynamic import DynamicLazyConstraintsComponent
from miplearn.solvers.internal import InternalSolver
from miplearn.solvers.learning import LearningSolver
from miplearn.tests import get_test_pyomo_instances
from numpy.linalg import norm
E = 0.1

@ -4,13 +4,11 @@
from unittest.mock import Mock, call
from miplearn import (
StaticLazyConstraintsComponent,
LearningSolver,
Instance,
InternalSolver,
)
from miplearn.classifiers import Classifier
from miplearn.components.lazy_static import StaticLazyConstraintsComponent
from miplearn.instance import Instance
from miplearn.solvers.internal import InternalSolver
from miplearn.solvers.learning import LearningSolver
def test_usage_with_solver():
@ -49,7 +47,9 @@ def test_usage_with_solver():
)
component = StaticLazyConstraintsComponent(
threshold=0.90, use_two_phase_gap=False, violation_tolerance=1.0
threshold=0.90,
use_two_phase_gap=False,
violation_tolerance=1.0,
)
component.classifiers = {
"type-a": Mock(spec=Classifier),

@ -5,8 +5,9 @@
from unittest.mock import Mock
import numpy as np
from miplearn import ObjectiveValueComponent
from miplearn.classifiers import Regressor
from miplearn.components.objective import ObjectiveValueComponent
from miplearn.tests import get_test_pyomo_instances
@ -14,8 +15,8 @@ def test_usage():
instances, models = get_test_pyomo_instances()
comp = ObjectiveValueComponent()
comp.fit(instances)
assert instances[0].lower_bound == 1183.0
assert instances[0].upper_bound == 1183.0
assert instances[0].training_data[0]["Lower bound"] == 1183.0
assert instances[0].training_data[0]["Upper bound"] == 1183.0
assert np.round(comp.predict(instances), 2).tolist() == [
[1183.0, 1183.0],
[1070.0, 1070.0],

@ -5,8 +5,9 @@
from unittest.mock import Mock
import numpy as np
from miplearn import PrimalSolutionComponent
from miplearn.classifiers import Classifier
from miplearn.components.primal import PrimalSolutionComponent
from miplearn.tests import get_test_pyomo_instances
@ -49,7 +50,7 @@ def test_evaluate():
comp = PrimalSolutionComponent(classifier=[clf_zero, clf_one], threshold=0.50)
comp.fit(instances[:1])
assert comp.predict(instances[0]) == {"x": {0: 0, 1: 0, 2: 1, 3: None}}
assert instances[0].solution == {"x": {0: 1, 1: 0, 2: 1, 3: 1}}
assert instances[0].training_data[0]["Solution"] == {"x": {0: 1, 1: 0, 2: 1, 3: 1}}
ev = comp.evaluate(instances[:1])
assert ev == {
"Fix one": {

@ -2,14 +2,13 @@
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
import gzip
import logging
import pickle
import gzip
from abc import ABC, abstractmethod
import numpy as np
from tqdm.auto import tqdm
from abc import ABC, abstractmethod
logger = logging.getLogger(__name__)
@ -48,10 +47,10 @@ class Extractor(ABC):
@staticmethod
def split_variables(instance):
assert hasattr(instance, "lp_solution")
result = {}
for var_name in instance.lp_solution:
for index in instance.lp_solution[var_name]:
lp_solution = instance.training_data[0]["LP solution"]
for var_name in lp_solution:
for index in lp_solution[var_name]:
category = instance.get_variable_category(var_name, index)
if category is None:
continue
@ -71,6 +70,7 @@ class VariableFeaturesExtractor(Extractor):
):
instance_features = instance.get_instance_features()
var_split = self.split_variables(instance)
lp_solution = instance.training_data[0]["LP solution"]
for (category, var_index_pairs) in var_split.items():
if category not in result:
result[category] = []
@ -78,7 +78,7 @@ class VariableFeaturesExtractor(Extractor):
result[category] += [
instance_features.tolist()
+ instance.get_variable_features(var_name, index).tolist()
+ [instance.lp_solution[var_name][index]]
+ [lp_solution[var_name][index]]
]
for category in result:
result[category] = np.array(result[category])
@ -97,14 +97,15 @@ class SolutionExtractor(Extractor):
disable=len(instances) < 5,
):
var_split = self.split_variables(instance)
if self.relaxation:
solution = instance.training_data[0]["LP solution"]
else:
solution = instance.training_data[0]["Solution"]
for (category, var_index_pairs) in var_split.items():
if category not in result:
result[category] = []
for (var_name, index) in var_index_pairs:
if self.relaxation:
v = instance.lp_solution[var_name][index]
else:
v = instance.solution[var_name][index]
v = solution[var_name][index]
if v is None:
result[category] += [[0, 0]]
else:
@ -121,7 +122,7 @@ class InstanceFeaturesExtractor(Extractor):
np.hstack(
[
instance.get_instance_features(),
instance.lp_value,
instance.training_data[0]["LP value"],
]
)
for instance in InstanceIterator(instances)
@ -137,13 +138,22 @@ class ObjectiveValueExtractor(Extractor):
def extract(self, instances):
if self.kind == "lower bound":
return np.array(
[[instance.lower_bound] for instance in InstanceIterator(instances)]
[
[instance.training_data[0]["Lower bound"]]
for instance in InstanceIterator(instances)
]
)
if self.kind == "upper bound":
return np.array(
[[instance.upper_bound] for instance in InstanceIterator(instances)]
[
[instance.training_data[0]["Upper bound"]]
for instance in InstanceIterator(instances)
]
)
if self.kind == "lp":
return np.array(
[[instance.lp_value] for instance in InstanceIterator(instances)]
[
[instance.training_data[0]["LP value"]]
for instance in InstanceIterator(instances)
]
)

@ -5,21 +5,28 @@
import gzip
import json
from abc import ABC, abstractmethod
from typing import Any
from typing import Any, List
import numpy as np
from miplearn.types import TrainingSample
class Instance(ABC):
"""
Abstract class holding all the data necessary to generate a concrete model of the problem.
In the knapsack problem, for example, this class could hold the number of items, their weights
and costs, as well as the size of the knapsack. Objects implementing this class are able to
convert themselves into a concrete optimization model, which can be optimized by a solver, or
into arrays of features, which can be provided as inputs to machine learning models.
Abstract class holding all the data necessary to generate a concrete model of the
problem.
In the knapsack problem, for example, this class could hold the number of items,
their weights and costs, as well as the size of the knapsack. Objects
implementing this class are able to convert themselves into a concrete
optimization model, which can be optimized by a solver, or into arrays of
features, which can be provided as inputs to machine learning models.
"""
def __init__(self):
self.training_data: List[TrainingSample] = []
@abstractmethod
def to_model(self) -> Any:
"""
@ -29,21 +36,23 @@ class Instance(ABC):
def get_instance_features(self):
"""
Returns a 1-dimensional Numpy array of (numerical) features describing the entire instance.
Returns a 1-dimensional Numpy array of (numerical) features describing the
entire instance.
The array is used by LearningSolver to determine how similar two instances are. It may also
be used to predict, in combination with variable-specific features, the values of binary
decision variables in the problem.
The array is used by LearningSolver to determine how similar two instances
are. It may also be used to predict, in combination with variable-specific
features, the values of binary decision variables in the problem.
There is not necessarily a one-to-one correspondence between models and instance features:
the features may encode only part of the data necessary to generate the complete model.
Features may also be statistics computed from the original data. For example, in the
knapsack problem, an implementation may decide to provide as instance features only
the average weights, average prices, number of items and the size of the knapsack.
There is not necessarily a one-to-one correspondence between models and
instance features: the features may encode only part of the data necessary to
generate the complete model. Features may also be statistics computed from
the original data. For example, in the knapsack problem, an implementation
may decide to provide as instance features only the average weights, average
prices, number of items and the size of the knapsack.
The returned array MUST have the same length for all relevant instances of the problem. If
two instances map into arrays of different lengths, they cannot be solved by the same
LearningSolver object.
The returned array MUST have the same length for all relevant instances of
the problem. If two instances map into arrays of different lengths,
they cannot be solved by the same LearningSolver object.
By default, returns [0].
"""
@ -51,20 +60,22 @@ class Instance(ABC):
def get_variable_features(self, var, index):
"""
Returns a 1-dimensional array of (numerical) features describing a particular decision
variable.
Returns a 1-dimensional array of (numerical) features describing a particular
decision variable.
The argument `var` is a pyomo.core.Var object, which represents a collection of decision
variables. The argument `index` specifies which variable in the collection is the relevant
one.
The argument `var` is a pyomo.core.Var object, which represents a collection
of decision variables. The argument `index` specifies which variable in the
collection is the relevant one.
In combination with instance features, variable features are used by LearningSolver to
predict, among other things, the optimal value of each decision variable before the
optimization takes place. In the knapsack problem, for example, an implementation could
provide as variable features the weight and the price of a specific item.
In combination with instance features, variable features are used by
LearningSolver to predict, among other things, the optimal value of each
decision variable before the optimization takes place. In the knapsack
problem, for example, an implementation could provide as variable features
the weight and the price of a specific item.
Like instance features, the arrays returned by this method MUST have the same length for
all variables within the same category, for all relevant instances of the problem.
Like instance features, the arrays returned by this method MUST have the same
length for all variables within the same category, for all relevant instances
of the problem.
By default, returns [0].
"""
@ -72,12 +83,12 @@ class Instance(ABC):
def get_variable_category(self, var, index):
"""
Returns the category (a string, an integer or any hashable type) for each decision
variable.
Returns the category (a string, an integer or any hashable type) for each
decision variable.
If two variables have the same category, LearningSolver will use the same internal ML
model to predict the values of both variables. If the returned category is None, ML
models will ignore the variable.
If two variables have the same category, LearningSolver will use the same
internal ML model to predict the values of both variables. If the returned
category is None, ML models will ignore the variable.
By default, returns "default".
"""
@ -102,16 +113,16 @@ class Instance(ABC):
"""
Returns lazy constraint violations found for the current solution.
After solving a model, LearningSolver will ask the instance to identify which lazy
constraints are violated by the current solution. For each identified violation,
LearningSolver will then call the build_lazy_constraint, add the generated Pyomo
constraint to the model, then resolve the problem. The process repeats until no further
lazy constraint violations are found.
After solving a model, LearningSolver will ask the instance to identify which
lazy constraints are violated by the current solution. For each identified
violation, LearningSolver will then call the build_lazy_constraint, add the
generated Pyomo constraint to the model, then resolve the problem. The
process repeats until no further lazy constraint violations are found.
Each "violation" is simply a string, a tuple or any other hashable type which allows the
instance to identify unambiguously which lazy constraint should be generated. In the
Traveling Salesman Problem, for example, a subtour violation could be a frozen set
containing the cities in the subtour.
Each "violation" is simply a string, a tuple or any other hashable type which
allows the instance to identify unambiguously which lazy constraint should be
generated. In the Traveling Salesman Problem, for example, a subtour
violation could be a frozen set containing the cities in the subtour.
For a concrete example, see TravelingSalesmanInstance.
"""
@ -121,15 +132,17 @@ class Instance(ABC):
"""
Returns a Pyomo constraint which fixes a given violation.
This method is typically called immediately after find_violated_lazy_constraints. The violation object
provided to this method is exactly the same object returned earlier by find_violated_lazy_constraints.
After some training, LearningSolver may decide to proactively build some lazy constraints
at the beginning of the optimization process, before a solution is even available. In this
case, build_lazy_constraints will be called without a corresponding call to
This method is typically called immediately after
find_violated_lazy_constraints. The violation object provided to this method
is exactly the same object returned earlier by
find_violated_lazy_constraints. After some training, LearningSolver may
decide to proactively build some lazy constraints at the beginning of the
optimization process, before a solution is even available. In this case,
build_lazy_constraints will be called without a corresponding call to
find_violated_lazy_constraints.
The implementation should not directly add the constraint to the model. The constraint
will be added by LearningSolver after the method returns.
The implementation should not directly add the constraint to the model. The
constraint will be added by LearningSolver after the method returns.
For a concrete example, see TravelingSalesmanInstance.
"""

@ -2,10 +2,9 @@
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from datetime import timedelta
import logging
import time
import sys
import time
class TimeFormatter(logging.Formatter):

@ -2,13 +2,13 @@
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
import miplearn
from miplearn import Instance
import numpy as np
import pyomo.environ as pe
from scipy.stats import uniform, randint, bernoulli
from scipy.stats import uniform, randint
from scipy.stats.distributions import rv_frozen
from miplearn.instance import Instance
class ChallengeA:
"""
@ -56,6 +56,7 @@ class MultiKnapsackInstance(Instance):
"""
def __init__(self, prices, capacities, weights):
super().__init__()
assert isinstance(prices, np.ndarray)
assert isinstance(capacities, np.ndarray)
assert isinstance(weights, np.ndarray)
@ -241,6 +242,7 @@ class KnapsackInstance(Instance):
"""
def __init__(self, weights, prices, capacity):
super().__init__()
self.weights = weights
self.prices = prices
self.capacity = capacity

@ -8,7 +8,7 @@ import pyomo.environ as pe
from scipy.stats import uniform, randint
from scipy.stats.distributions import rv_frozen
from miplearn import Instance
from miplearn.instance import Instance
class ChallengeA:
@ -101,6 +101,7 @@ class MaxWeightStableSetInstance(Instance):
"""
def __init__(self, graph, weights):
super().__init__()
self.graph = graph
self.weights = weights

@ -2,10 +2,10 @@
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from miplearn import LearningSolver
from miplearn.problems.knapsack import MultiKnapsackGenerator, MultiKnapsackInstance
from scipy.stats import uniform, randint
import numpy as np
from scipy.stats import uniform, randint
from miplearn.problems.knapsack import MultiKnapsackGenerator
def test_knapsack_generator():

@ -4,18 +4,19 @@
import networkx as nx
import numpy as np
from miplearn import LearningSolver
from miplearn.problems.stab import MaxWeightStableSetInstance
from scipy.stats import uniform, randint
from miplearn.problems.stab import MaxWeightStableSetInstance
from miplearn.solvers.learning import LearningSolver
def test_stab():
graph = nx.cycle_graph(5)
weights = [1.0, 1.0, 1.0, 1.0, 1.0]
instance = MaxWeightStableSetInstance(graph, weights)
solver = LearningSolver()
solver.solve(instance)
assert instance.lower_bound == 2.0
stats = solver.solve(instance)
assert stats["Lower bound"] == 2.0
def test_stab_generator_fixed_graph():

@ -2,13 +2,14 @@
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from miplearn import LearningSolver
from miplearn.problems.tsp import TravelingSalesmanGenerator, TravelingSalesmanInstance
import numpy as np
from numpy.linalg import norm
from scipy.spatial.distance import pdist, squareform
from scipy.stats import uniform, randint
from miplearn.problems.tsp import TravelingSalesmanGenerator, TravelingSalesmanInstance
from miplearn.solvers.learning import LearningSolver
def test_generator():
instances = TravelingSalesmanGenerator(
@ -37,16 +38,16 @@ def test_instance():
)
instance = TravelingSalesmanInstance(n_cities, distances)
solver = LearningSolver()
solver.solve(instance)
x = instance.solution["x"]
stats = solver.solve(instance)
x = instance.training_data[0]["Solution"]["x"]
assert x[0, 1] == 1.0
assert x[0, 2] == 0.0
assert x[0, 3] == 1.0
assert x[1, 2] == 1.0
assert x[1, 3] == 0.0
assert x[2, 3] == 1.0
assert instance.lower_bound == 4.0
assert instance.upper_bound == 4.0
assert stats["Lower bound"] == 4.0
assert stats["Upper bound"] == 4.0
def test_subtour():
@ -67,7 +68,7 @@ def test_subtour():
solver.solve(instance)
assert hasattr(instance, "found_violated_lazy_constraints")
assert hasattr(instance, "found_violated_user_cuts")
x = instance.solution["x"]
x = instance.training_data[0]["Solution"]["x"]
assert x[0, 1] == 1.0
assert x[0, 4] == 1.0
assert x[1, 2] == 1.0

@ -2,14 +2,14 @@
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
import networkx as nx
import numpy as np
import pyomo.environ as pe
from miplearn import Instance
from scipy.stats import uniform, randint
from scipy.spatial.distance import pdist, squareform
from scipy.stats import uniform, randint
from scipy.stats.distributions import rv_frozen
import networkx as nx
import random
from miplearn.instance import Instance
class ChallengeA:

@ -8,15 +8,15 @@ from io import StringIO
from random import randint
from typing import List, Any, Dict, Union, Tuple, Optional
from . import RedirectOutput
from .internal import (
from miplearn.instance import Instance
from miplearn.solvers import RedirectOutput
from miplearn.solvers.internal import (
InternalSolver,
LPSolveStats,
IterationCallback,
LazyCallback,
MIPSolveStats,
)
from .. import Instance
logger = logging.getLogger(__name__)
@ -181,6 +181,7 @@ class GurobiSolver(InternalSolver):
sense = "max"
lb = self.model.objVal
ub = self.model.objBound
ws_value = self._extract_warm_start_value(log)
stats: MIPSolveStats = {
"Lower bound": lb,
"Upper bound": ub,
@ -188,10 +189,9 @@ class GurobiSolver(InternalSolver):
"Nodes": total_nodes,
"Sense": sense,
"Log": log,
"Warm start value": ws_value,
"LP value": None,
}
ws_value = self._extract_warm_start_value(log)
if ws_value is not None:
stats["Warm start value"] = ws_value
return stats
def get_solution(self) -> Dict:

@ -4,11 +4,15 @@
import logging
from abc import ABC, abstractmethod
from typing import Callable, Any, Dict, List
from typing_extensions import TypedDict
from ..instance import Instance
from typing import Any, Dict, List
from miplearn.instance import Instance
from miplearn.types import (
LPSolveStats,
IterationCallback,
LazyCallback,
MIPSolveStats,
)
logger = logging.getLogger(__name__)
@ -21,33 +25,6 @@ class Constraint:
pass
LPSolveStats = TypedDict(
"LPSolveStats",
{
"Optimal value": float,
"Log": str,
},
)
MIPSolveStats = TypedDict(
"MIPSolveStats",
{
"Lower bound": float,
"Upper bound": float,
"Wallclock time": float,
"Nodes": float,
"Sense": str,
"Log": str,
"Warm start value": float,
},
total=False,
)
IterationCallback = Callable[[], bool]
LazyCallback = Callable[[Any, Any], None]
class InternalSolver(ABC):
"""
Abstract class representing the MIP solver used internally by LearningSolver.

@ -2,26 +2,24 @@
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
import gzip
import logging
import pickle
import os
import pickle
import tempfile
import gzip
from copy import deepcopy
from typing import Optional, List
from typing import Optional, List, Any, IO, cast, BinaryIO, Union
from p_tqdm import p_map
from tempfile import NamedTemporaryFile
from . import RedirectOutput
from .. import (
ObjectiveValueComponent,
PrimalSolutionComponent,
DynamicLazyConstraintsComponent,
UserCutsComponent,
)
from ..solvers.internal import InternalSolver
from ..solvers.pyomo.gurobi import GurobiPyomoSolver
from miplearn.components.cuts import UserCutsComponent
from miplearn.components.lazy_dynamic import DynamicLazyConstraintsComponent
from miplearn.components.objective import ObjectiveValueComponent
from miplearn.components.primal import PrimalSolutionComponent
from miplearn.instance import Instance
from miplearn.solvers import RedirectOutput
from miplearn.solvers.pyomo.gurobi import GurobiPyomoSolver
from miplearn.types import MIPSolveStats, TrainingSample
logger = logging.getLogger(__name__)
@ -117,11 +115,11 @@ class LearningSolver:
def solve(
self,
instance,
model=None,
output="",
tee=False,
):
instance: Union[Instance, str],
model: Any = None,
output: str = "",
tee: bool = False,
) -> MIPSolveStats:
"""
Solves the given instance. If trained machine-learning models are
available, they will be used to accelerate the solution process.
@ -129,20 +127,9 @@ class LearningSolver:
The argument `instance` may be either an Instance object or a
filename pointing to a pickled Instance object.
This method modifies the instance object. Specifically, the following
properties are set:
- instance.lp_solution
- instance.lp_value
- instance.lower_bound
- instance.upper_bound
- instance.solution
- instance.solver_log
Additional solver components may set additional properties. Please
see their documentation for more details. If a filename is provided,
then the file is modified in-place. That is, the original file is
overwritten.
This method adds a new training sample to `instance.training_sample`.
If a filename is provided, then the file is modified in-place. That is,
the original file is overwritten.
If `solver.solve_lp_first` is False, the properties lp_solution and
lp_value will be set to dummy values.
@ -192,46 +179,62 @@ class LearningSolver:
def _solve(
self,
instance,
model=None,
output="",
tee=False,
):
instance: Union[Instance, str],
model: Any = None,
output: str = "",
tee: bool = False,
) -> MIPSolveStats:
# Load instance from file, if necessary
filename = None
fileformat = None
file: Union[BinaryIO, gzip.GzipFile]
if isinstance(instance, str):
filename = instance
logger.info("Reading: %s" % filename)
if filename.endswith(".gz"):
fileformat = "pickle-gz"
with gzip.GzipFile(filename, "rb") as file:
instance = pickle.load(file)
instance = pickle.load(cast(IO[bytes], file))
else:
fileformat = "pickle"
with open(filename, "rb") as file:
instance = pickle.load(file)
instance = pickle.load(cast(IO[bytes], file))
assert isinstance(instance, Instance)
# Generate model
if model is None:
with RedirectOutput([]):
model = instance.to_model()
# Initialize training sample
training_sample: TrainingSample = {}
if not hasattr(instance, "training_data"):
instance.training_data = []
instance.training_data += [training_sample]
# Initialize internal solver
self.tee = tee
self.internal_solver = self.solver_factory()
self.internal_solver.set_instance(instance, model)
# Solve linear relaxation
if self.solve_lp_first:
logger.info("Solving LP relaxation...")
results = self.internal_solver.solve_lp(tee=tee)
instance.lp_solution = self.internal_solver.get_solution()
instance.lp_value = results["Optimal value"]
stats = self.internal_solver.solve_lp(tee=tee)
training_sample["LP solution"] = self.internal_solver.get_solution()
training_sample["LP value"] = stats["Optimal value"]
training_sample["LP log"] = stats["Log"]
else:
instance.lp_solution = self.internal_solver.get_empty_solution()
instance.lp_value = 0.0
training_sample["LP solution"] = self.internal_solver.get_empty_solution()
training_sample["LP value"] = 0
# Before-solve callbacks
logger.debug("Running before_solve callbacks...")
for component in self.components.values():
component.before_solve(self, instance, model)
# Define wrappers
def iteration_cb():
should_repeat = False
for comp in self.components.values():
@ -247,29 +250,28 @@ class LearningSolver:
if self.use_lazy_cb:
lazy_cb = lazy_cb_wrapper
# Solve MILP
logger.info("Solving MILP...")
stats = self.internal_solver.solve(
tee=tee,
iteration_cb=iteration_cb,
lazy_cb=lazy_cb,
)
stats["LP value"] = instance.lp_value
if "LP value" in training_sample.keys():
stats["LP value"] = training_sample["LP value"]
# Read MIP solution and bounds
instance.lower_bound = stats["Lower bound"]
instance.upper_bound = stats["Upper bound"]
instance.solver_log = stats["Log"]
instance.solution = self.internal_solver.get_solution()
training_sample["Lower bound"] = stats["Lower bound"]
training_sample["Upper bound"] = stats["Upper bound"]
training_sample["MIP log"] = stats["Log"]
training_sample["Solution"] = self.internal_solver.get_solution()
# After-solve callbacks
logger.debug("Calling after_solve callbacks...")
training_data = {}
for component in self.components.values():
component.after_solve(self, instance, model, stats, training_data)
if not hasattr(instance, "training_data"):
instance.training_data = []
instance.training_data += [training_data]
component.after_solve(self, instance, model, stats, training_sample)
# Write to file, if necessary
if filename is not None and output is not None:
output_filename = output
if len(output) == 0:
@ -277,11 +279,10 @@ class LearningSolver:
logger.info("Writing: %s" % output_filename)
if fileformat == "pickle":
with open(output_filename, "wb") as file:
pickle.dump(instance, file)
pickle.dump(instance, cast(IO[bytes], file))
else:
with gzip.GzipFile(output_filename, "wb") as file:
pickle.dump(instance, file)
pickle.dump(instance, cast(IO[bytes], file))
return stats
def parallel_solve(
@ -340,7 +341,7 @@ class LearningSolver:
self._restore_miplearn_logger()
return stats
def fit(self, training_instances):
def fit(self, training_instances: Union[List[str], List[Instance]]) -> None:
if len(training_instances) == 0:
return
for component in self.components.values():

@ -12,15 +12,15 @@ import pyomo
from pyomo import environ as pe
from pyomo.core import Var, Constraint
from .. import RedirectOutput
from ..internal import (
from miplearn.instance import Instance
from miplearn.solvers import RedirectOutput
from miplearn.solvers.internal import (
InternalSolver,
LPSolveStats,
IterationCallback,
LazyCallback,
MIPSolveStats,
)
from ...instance import Instance
logger = logging.getLogger(__name__)
@ -98,19 +98,18 @@ class BasePyomoSolver(InternalSolver):
if not should_repeat:
break
log = streams[0].getvalue()
node_count = self._extract_node_count(log)
ws_value = self._extract_warm_start_value(log)
stats: MIPSolveStats = {
"Lower bound": results["Problem"][0]["Lower bound"],
"Upper bound": results["Problem"][0]["Upper bound"],
"Wallclock time": total_wallclock_time,
"Sense": self._obj_sense,
"Log": log,
"Nodes": node_count,
"Warm start value": ws_value,
"LP value": None,
}
node_count = self._extract_node_count(log)
ws_value = self._extract_warm_start_value(log)
if node_count is not None:
stats["Nodes"] = node_count
if ws_value is not None:
stats["Warm start value"] = ws_value
return stats
def get_solution(self) -> Dict:

@ -5,7 +5,7 @@
from pyomo import environ as pe
from scipy.stats import randint
from .base import BasePyomoSolver
from miplearn.solvers.pyomo.base import BasePyomoSolver
class CplexPyomoSolver(BasePyomoSolver):

@ -7,7 +7,7 @@ import logging
from pyomo import environ as pe
from scipy.stats import randint
from .base import BasePyomoSolver
from miplearn.solvers.pyomo.base import BasePyomoSolver
logger = logging.getLogger(__name__)

@ -7,7 +7,7 @@ import logging
from pyomo import environ as pe
from scipy.stats import randint
from .base import BasePyomoSolver
from miplearn.solvers.pyomo.base import BasePyomoSolver
logger = logging.getLogger(__name__)

@ -5,15 +5,18 @@
from inspect import isclass
from typing import List, Callable
from miplearn import BasePyomoSolver, GurobiSolver, GurobiPyomoSolver, InternalSolver
from miplearn.problems.knapsack import KnapsackInstance, GurobiKnapsackInstance
from miplearn.solvers.gurobi import GurobiSolver
from miplearn.solvers.internal import InternalSolver
from miplearn.solvers.pyomo.base import BasePyomoSolver
from miplearn.solvers.pyomo.gurobi import GurobiPyomoSolver
from miplearn.solvers.pyomo.xpress import XpressPyomoSolver
def _get_instance(solver):
def _is_subclass_or_instance(solver, parentClass):
return isinstance(solver, parentClass) or (
isclass(solver) and issubclass(solver, parentClass)
def _is_subclass_or_instance(obj, parent_class):
return isinstance(obj, parent_class) or (
isclass(obj) and issubclass(obj, parent_class)
)
if _is_subclass_or_instance(solver, BasePyomoSolver):

@ -8,9 +8,10 @@ from warnings import warn
import pyomo.environ as pe
from miplearn import BasePyomoSolver, GurobiSolver
from miplearn.solvers import RedirectOutput
from . import _get_instance, _get_internal_solvers
from miplearn.solvers.gurobi import GurobiSolver
from miplearn.solvers.pyomo.base import BasePyomoSolver
from miplearn.solvers.tests import _get_instance, _get_internal_solvers
logger = logging.getLogger(__name__)
@ -44,7 +45,7 @@ def test_internal_solver_warm_starts():
}
)
stats = solver.solve(tee=True)
if "Warm start value" in stats:
if stats["Warm start value"] is not None:
assert stats["Warm start value"] == 725.0
else:
warn(f"{solver_class.__name__} should set warm start value")
@ -60,7 +61,7 @@ def test_internal_solver_warm_starts():
}
)
stats = solver.solve(tee=True)
assert "Warm start value" not in stats
assert stats["Warm start value"] is None
solver.fix(
{

@ -4,8 +4,8 @@
import logging
from . import _get_instance
from ... import GurobiSolver
from miplearn.solvers.gurobi import GurobiSolver
from miplearn.solvers.tests import _get_instance
logger = logging.getLogger(__name__)

@ -7,13 +7,9 @@ import pickle
import tempfile
import os
from miplearn import (
LearningSolver,
GurobiSolver,
DynamicLazyConstraintsComponent,
)
from . import _get_instance, _get_internal_solvers
from miplearn.solvers.gurobi import GurobiSolver
from miplearn.solvers.learning import LearningSolver
from miplearn.solvers.tests import _get_instance, _get_internal_solvers
logger = logging.getLogger(__name__)
@ -29,20 +25,19 @@ def test_learning_solver():
)
solver.solve(instance)
assert instance.solution["x"][0] == 1.0
assert instance.solution["x"][1] == 0.0
assert instance.solution["x"][2] == 1.0
assert instance.solution["x"][3] == 1.0
assert instance.lower_bound == 1183.0
assert instance.upper_bound == 1183.0
assert round(instance.lp_solution["x"][0], 3) == 1.000
assert round(instance.lp_solution["x"][1], 3) == 0.923
assert round(instance.lp_solution["x"][2], 3) == 1.000
assert round(instance.lp_solution["x"][3], 3) == 0.000
assert round(instance.lp_value, 3) == 1287.923
assert instance.found_violated_lazy_constraints == []
assert instance.found_violated_user_cuts == []
assert len(instance.solver_log) > 100
data = instance.training_data[0]
assert data["Solution"]["x"][0] == 1.0
assert data["Solution"]["x"][1] == 0.0
assert data["Solution"]["x"][2] == 1.0
assert data["Solution"]["x"][3] == 1.0
assert data["Lower bound"] == 1183.0
assert data["Upper bound"] == 1183.0
assert round(data["LP solution"]["x"][0], 3) == 1.000
assert round(data["LP solution"]["x"][1], 3) == 0.923
assert round(data["LP solution"]["x"][2], 3) == 1.000
assert round(data["LP solution"]["x"][3], 3) == 0.000
assert round(data["LP value"], 3) == 1287.923
assert len(data["MIP log"]) > 100
solver.fit([instance])
solver.solve(instance)
@ -52,6 +47,19 @@ def test_learning_solver():
pickle.dump(solver, file)
def test_solve_without_lp():
for internal_solver in _get_internal_solvers():
logger.info("Solver: %s" % internal_solver)
instance = _get_instance(internal_solver)
solver = LearningSolver(
solver=internal_solver,
solve_lp_first=False,
)
solver.solve(instance)
solver.fit([instance])
solver.solve(instance)
def test_parallel_solve():
for internal_solver in _get_internal_solvers():
instances = [_get_instance(internal_solver) for _ in range(10)]
@ -59,7 +67,8 @@ def test_parallel_solve():
results = solver.parallel_solve(instances, n_jobs=3)
assert len(results) == 10
for instance in instances:
assert len(instance.solution["x"].keys()) == 4
data = instance.training_data[0]
assert len(data["Solution"]["x"].keys()) == 4
def test_solve_fit_from_disk():
@ -77,14 +86,14 @@ def test_solve_fit_from_disk():
solver.solve(filenames[0])
with open(filenames[0], "rb") as file:
instance = pickle.load(file)
assert hasattr(instance, "solution")
assert len(instance.training_data) > 0
# Test: parallel_solve
solver.parallel_solve(filenames)
for filename in filenames:
with open(filename, "rb") as file:
instance = pickle.load(file)
assert hasattr(instance, "solution")
assert len(instance.training_data) > 0
# Test: solve (with specified output)
output = [f + ".out" for f in filenames]

@ -1,8 +1,9 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from miplearn import LearningSolver
from miplearn.problems.knapsack import KnapsackInstance
from miplearn.solvers.learning import LearningSolver
def get_test_pyomo_instances():

@ -4,10 +4,12 @@
import os.path
from miplearn import LearningSolver, BenchmarkRunner
from miplearn.benchmark import BenchmarkRunner
from miplearn.problems.stab import MaxWeightStableSetGenerator
from scipy.stats import randint
from miplearn.solvers.learning import LearningSolver
def test_benchmark():
# Generate training and test instances

@ -1,16 +1,15 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
import numpy as np
from miplearn.problems.knapsack import KnapsackInstance
from miplearn import (
LearningSolver,
from miplearn.extractors import (
SolutionExtractor,
InstanceFeaturesExtractor,
VariableFeaturesExtractor,
)
import numpy as np
import pyomo.environ as pe
from miplearn.problems.knapsack import KnapsackInstance
from miplearn.solvers.learning import LearningSolver
def _get_instances():

@ -0,0 +1,46 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from typing import TypedDict, Optional, Dict, Callable, Any
TrainingSample = TypedDict(
"TrainingSample",
{
"LP log": str,
"LP solution": Dict,
"LP value": float,
"Lower bound": float,
"MIP log": str,
"Solution": Dict,
"Upper bound": float,
"slacks": Dict,
},
total=False,
)
LPSolveStats = TypedDict(
"LPSolveStats",
{
"Optimal value": float,
"Log": str,
},
)
MIPSolveStats = TypedDict(
"MIPSolveStats",
{
"Lower bound": Optional[float],
"Upper bound": Optional[float],
"Wallclock time": float,
"Nodes": Optional[int],
"Sense": str,
"Log": str,
"Warm start value": Optional[float],
"LP value": Optional[float],
},
)
IterationCallback = Callable[[], bool]
LazyCallback = Callable[[Any, Any], None]
Loading…
Cancel
Save