Remove obsolete methods

master
Alinson S. Xavier 5 years ago
parent c26b852c67
commit c4a6665825
No known key found for this signature in database
GPG Key ID: DCA0DAD4D2F58624

@ -13,12 +13,6 @@ from .components.dynamic_user_cuts import UserCutsComponent
from .components.objective import ObjectiveValueComponent
from .components.primal import PrimalSolutionComponent
from .components.static_lazy import StaticLazyConstraintsComponent
from .features import (
Features,
TrainingSample,
Variable,
InstanceFeatures,
)
from .instance.base import Instance
from .instance.picklegz import (
PickleGzInstance,

@ -7,7 +7,7 @@ from typing import Any, List, TYPE_CHECKING, Tuple, Dict, Hashable, Optional
import numpy as np
from overrides import EnforceOverrides
from miplearn.features import TrainingSample, Features, Sample
from miplearn.features import Sample
from miplearn.instance.base import Instance
from miplearn.types import LearningSolveStats
@ -39,21 +39,6 @@ class Component(EnforceOverrides):
"""
return
def after_solve_lp_old(
self,
solver: "LearningSolver",
instance: Instance,
model: Any,
stats: LearningSolveStats,
features: Features,
training_data: TrainingSample,
) -> None:
"""
Method called by LearningSolver after the root LP relaxation is solved.
See before_solve_lp for a description of the parameters.
"""
return
def after_solve_mip(
self,
solver: "LearningSolver",
@ -68,21 +53,6 @@ class Component(EnforceOverrides):
"""
return
def after_solve_mip_old(
self,
solver: "LearningSolver",
instance: Instance,
model: Any,
stats: LearningSolveStats,
features: Features,
training_data: TrainingSample,
) -> None:
"""
Method called by LearningSolver after the MIP is solved.
See before_solve_lp for a description of the parameters.
"""
return
def before_solve_lp(
self,
solver: "LearningSolver",
@ -115,43 +85,6 @@ class Component(EnforceOverrides):
"""
return
def before_solve_lp_old(
self,
solver: "LearningSolver",
instance: Instance,
model: Any,
stats: LearningSolveStats,
features: Features,
training_data: TrainingSample,
) -> None:
"""
Method called by LearningSolver before the root LP relaxation is solved.
Parameters
----------
solver: LearningSolver
The solver calling this method.
instance: Instance
The instance being solved.
model
The concrete optimization model being solved.
stats: LearningSolveStats
A dictionary containing statistics about the solution process, such as
number of nodes explored and running time. Components are free to add
their own statistics here. For example, PrimalSolutionComponent adds
statistics regarding the number of predicted variables. All statistics in
this dictionary are exported to the benchmark CSV file.
features: miplearn.features.Features
Features describing the model.
training_data: TrainingSample
A dictionary containing data that may be useful for training machine
learning models and accelerating the solution process. Components are
free to add their own training data here. For example,
PrimalSolutionComponent adds the current primal solution. The data must
be pickable.
"""
return
def before_solve_mip(
self,
solver: "LearningSolver",
@ -166,30 +99,6 @@ class Component(EnforceOverrides):
"""
return
def before_solve_mip_old(
self,
solver: "LearningSolver",
instance: Instance,
model: Any,
stats: LearningSolveStats,
features: Features,
training_data: TrainingSample,
) -> None:
"""
Method called by LearningSolver before the MIP is solved.
See before_solve_lp for a description of the parameters.
"""
return
def evaluate_old(self, instances: List[Instance]) -> List:
ev = []
for instance in instances:
instance.load()
for sample in instance.training_data:
ev += [self.sample_evaluate_old(instance, sample)]
instance.free()
return ev
def fit(
self,
training_instances: List[Instance],
@ -200,16 +109,6 @@ class Component(EnforceOverrides):
y[cat] = np.array(y[cat])
self.fit_xy(x, y)
def fit_old(
self,
training_instances: List[Instance],
) -> None:
x, y = self.xy_instances_old(training_instances)
for cat in x.keys():
x[cat] = np.array(x[cat])
y[cat] = np.array(y[cat])
self.fit_xy(x, y)
def fit_xy(
self,
x: Dict[Hashable, np.ndarray],
@ -259,13 +158,6 @@ class Component(EnforceOverrides):
) -> None:
return
def sample_evaluate_old(
self,
instance: Instance,
sample: TrainingSample,
) -> Dict[Hashable, Dict[str, float]]:
return {}
def sample_evaluate(
self,
instance: Optional[Instance],
@ -285,18 +177,6 @@ class Component(EnforceOverrides):
"""
pass
def sample_xy_old(
self,
instance: Instance,
sample: TrainingSample,
) -> Tuple[Dict, Dict]:
"""
Returns a pair of x and y dictionaries containing, respectively, the matrices
of ML features and the labels for the sample. If the training sample does not
include label information, returns (x, {}).
"""
pass
def user_cut_cb(
self,
solver: "LearningSolver",
@ -323,25 +203,3 @@ class Component(EnforceOverrides):
y_combined[cat] += y_sample[cat]
instance.free()
return x_combined, y_combined
def xy_instances_old(
self,
instances: List[Instance],
) -> Tuple[Dict, Dict]:
x_combined: Dict = {}
y_combined: Dict = {}
for instance in instances:
instance.load()
for sample in instance.training_data:
xy = self.sample_xy_old(instance, sample)
if xy is None:
continue
x_sample, y_sample = xy
for cat in x_sample.keys():
if cat not in x_combined:
x_combined[cat] = []
y_combined[cat] = []
x_combined[cat] += x_sample[cat]
y_combined[cat] += y_sample[cat]
instance.free()
return x_combined, y_combined

@ -12,7 +12,7 @@ from miplearn.classifiers import Classifier
from miplearn.classifiers.threshold import Threshold
from miplearn.components import classifier_evaluation_dict
from miplearn.components.component import Component
from miplearn.features import TrainingSample, Sample
from miplearn.features import Sample
from miplearn.instance.base import Instance
logger = logging.getLogger(__name__)
@ -37,44 +37,6 @@ class DynamicConstraintsComponent(Component):
self.known_cids: List[str] = []
self.attr = attr
def sample_xy_with_cids_old(
self,
instance: Instance,
sample: TrainingSample,
) -> Tuple[
Dict[Hashable, List[List[float]]],
Dict[Hashable, List[List[bool]]],
Dict[Hashable, List[str]],
]:
x: Dict[Hashable, List[List[float]]] = {}
y: Dict[Hashable, List[List[bool]]] = {}
cids: Dict[Hashable, List[str]] = {}
for cid in self.known_cids:
category = instance.get_constraint_category(cid)
if category is None:
continue
if category not in x:
x[category] = []
y[category] = []
cids[category] = []
assert instance.features.instance is not None
assert instance.features.instance.user_features is not None
cfeatures = instance.get_constraint_features(cid)
assert cfeatures is not None
assert isinstance(cfeatures, list)
for ci in cfeatures:
assert isinstance(ci, float)
f = list(instance.features.instance.user_features)
f += cfeatures
x[category] += [f]
cids[category] += [cid]
if getattr(sample, self.attr) is not None:
if cid in getattr(sample, self.attr):
y[category] += [[False, True]]
else:
y[category] += [[True, False]]
return x, y, cids
def sample_xy_with_cids(
self,
instance: Optional[Instance],
@ -122,15 +84,6 @@ class DynamicConstraintsComponent(Component):
y[category] += [[True, False]]
return x, y, cids
@overrides
def sample_xy_old(
self,
instance: Instance,
sample: TrainingSample,
) -> Tuple[Dict, Dict]:
x, y, _ = self.sample_xy_with_cids_old(instance, sample)
return x, y
@overrides
def sample_xy(
self,
@ -140,29 +93,6 @@ class DynamicConstraintsComponent(Component):
x, y, _ = self.sample_xy_with_cids(instance, sample)
return x, y
def sample_predict_old(
self,
instance: Instance,
sample: TrainingSample,
) -> List[Hashable]:
pred: List[Hashable] = []
if len(self.known_cids) == 0:
logger.info("Classifiers not fitted. Skipping.")
return pred
x, _, cids = self.sample_xy_with_cids_old(instance, sample)
for category in x.keys():
assert category in self.classifiers
assert category in self.thresholds
clf = self.classifiers[category]
thr = self.thresholds[category]
nx = np.array(x[category])
proba = clf.predict_proba(nx)
t = thr.predict(nx)
for i in range(proba.shape[0]):
if proba[i][1] > t[1]:
pred += [cids[category][i]]
return pred
def sample_predict(
self,
instance: Instance,
@ -186,20 +116,6 @@ class DynamicConstraintsComponent(Component):
pred += [cids[category][i]]
return pred
@overrides
def fit_old(self, training_instances: List[Instance]) -> None:
collected_cids = set()
for instance in training_instances:
instance.load()
for sample in instance.training_data:
if getattr(sample, self.attr) is None:
continue
collected_cids |= getattr(sample, self.attr)
instance.free()
self.known_cids.clear()
self.known_cids.extend(sorted(collected_cids))
super().fit_old(training_instances)
@overrides
def fit(self, training_instances: List[Instance]) -> None:
collected_cids = set()

@ -8,13 +8,13 @@ from typing import Dict, List, TYPE_CHECKING, Hashable, Tuple, Any, Optional, Se
import numpy as np
from overrides import overrides
from miplearn.instance.base import Instance
from miplearn.classifiers import Classifier
from miplearn.classifiers.counting import CountingClassifier
from miplearn.classifiers.threshold import MinProbabilityThreshold, Threshold
from miplearn.components.component import Component
from miplearn.components.dynamic_common import DynamicConstraintsComponent
from miplearn.features import TrainingSample, Features, Sample
from miplearn.features import Sample
from miplearn.instance.base import Instance
from miplearn.types import LearningSolveStats
logger = logging.getLogger(__name__)

@ -3,18 +3,18 @@
# Released under the modified BSD license. See COPYING.md for more details.
import logging
from typing import Any, TYPE_CHECKING, Hashable, Set, Tuple, Dict, List, Optional
from typing import Any, TYPE_CHECKING, Hashable, Set, Tuple, Dict, List
import numpy as np
from overrides import overrides
from miplearn.instance.base import Instance
from miplearn.classifiers import Classifier
from miplearn.classifiers.counting import CountingClassifier
from miplearn.classifiers.threshold import Threshold, MinProbabilityThreshold
from miplearn.components.component import Component
from miplearn.components.dynamic_common import DynamicConstraintsComponent
from miplearn.features import Features, TrainingSample, Sample
from miplearn.features import Sample
from miplearn.instance.base import Instance
from miplearn.types import LearningSolveStats
logger = logging.getLogger(__name__)

@ -12,7 +12,7 @@ from sklearn.linear_model import LinearRegression
from miplearn.classifiers import Regressor
from miplearn.classifiers.sklearn import ScikitLearnRegressor
from miplearn.components.component import Component
from miplearn.features import TrainingSample, Features, Sample
from miplearn.features import Sample
from miplearn.instance.base import Instance
from miplearn.types import LearningSolveStats

@ -21,7 +21,7 @@ from miplearn.classifiers.adaptive import AdaptiveClassifier
from miplearn.classifiers.threshold import MinPrecisionThreshold, Threshold
from miplearn.components import classifier_evaluation_dict
from miplearn.components.component import Component
from miplearn.features import TrainingSample, Features, Sample
from miplearn.features import Sample
from miplearn.instance.base import Instance
from miplearn.types import (
LearningSolveStats,

@ -8,12 +8,12 @@ from typing import Dict, Tuple, List, Hashable, Any, TYPE_CHECKING, Set, Optiona
import numpy as np
from overrides import overrides
from miplearn.instance.base import Instance
from miplearn.classifiers import Classifier
from miplearn.classifiers.counting import CountingClassifier
from miplearn.classifiers.threshold import MinProbabilityThreshold, Threshold
from miplearn.components.component import Component
from miplearn.features import TrainingSample, Features, Constraint, Sample
from miplearn.features import Constraint, Sample
from miplearn.instance.base import Instance
from miplearn.types import LearningSolveStats
logger = logging.getLogger(__name__)

@ -6,31 +6,17 @@ import collections
import numbers
from dataclasses import dataclass
from math import log, isfinite
from typing import TYPE_CHECKING, Dict, Optional, Set, List, Hashable
from typing import TYPE_CHECKING, Dict, Optional, List, Hashable
import numpy as np
from miplearn.types import Solution, Category
from miplearn.types import Category
if TYPE_CHECKING:
from miplearn.solvers.internal import InternalSolver, LPSolveStats, MIPSolveStats
from miplearn.instance.base import Instance
@dataclass
class TrainingSample:
lp_log: Optional[str] = None
lp_solution: Optional[Solution] = None
lp_value: Optional[float] = None
lazy_enforced: Optional[Set[Hashable]] = None
lower_bound: Optional[float] = None
mip_log: Optional[str] = None
solution: Optional[Solution] = None
upper_bound: Optional[float] = None
slacks: Optional[Dict[str, float]] = None
user_cuts_enforced: Optional[Set[Hashable]] = None
@dataclass
class InstanceFeatures:
user_features: Optional[List[float]] = None

@ -8,7 +8,7 @@ from typing import Any, List, Optional, Hashable, TYPE_CHECKING
from overrides import EnforceOverrides
from miplearn.features import TrainingSample, Features, Sample
from miplearn.features import Sample
from miplearn.types import VariableName, Category
logger = logging.getLogger(__name__)
@ -31,8 +31,6 @@ class Instance(ABC, EnforceOverrides):
"""
def __init__(self) -> None:
self.training_data: List[TrainingSample] = []
self.features: Features = Features()
self.samples: List[Sample] = []
@abstractmethod

@ -121,15 +121,11 @@ class PickleGzInstance(Instance):
obj = read_pickle_gz(self.filename)
assert isinstance(obj, Instance)
self.instance = obj
self.features = self.instance.features
self.training_data = self.instance.training_data
self.samples = self.instance.samples
@overrides
def free(self) -> None:
self.instance = None # type: ignore
self.features = None # type: ignore
self.training_data = None # type: ignore
gc.collect()
@overrides

@ -11,7 +11,8 @@ from scipy.spatial.distance import pdist, squareform
from scipy.stats import uniform, randint
from scipy.stats.distributions import rv_frozen
from miplearn import InternalSolver, BasePyomoSolver
from miplearn.solvers.learning import InternalSolver
from miplearn.solvers.pyomo.base import BasePyomoSolver
from miplearn.instance.base import Instance
from miplearn.types import VariableName, Category

@ -136,15 +136,6 @@ class GurobiSolver(InternalSolver):
var.lb = value
var.ub = value
@overrides
def get_dual(self, cid: str) -> float:
assert self.model is not None
c = self.model.getConstrByName(cid)
if self.is_infeasible():
return c.farkasDual
else:
return c.pi
@overrides
def get_constraint_attrs(self) -> List[str]:
return [
@ -175,14 +166,6 @@ class GurobiSolver(InternalSolver):
constraints[c.constrName] = constr
return constraints
@overrides
def get_sense(self) -> str:
assert self.model is not None
if self.model.modelSense == 1:
return "min"
else:
return "max"
@overrides
def get_solution(self) -> Optional[Solution]:
assert self.model is not None
@ -224,12 +207,6 @@ class GurobiSolver(InternalSolver):
"value",
]
@overrides
def get_variable_names(self) -> List[VariableName]:
self._raise_if_callback()
assert self.model is not None
return [v.varName for v in self.model.getVars()]
@overrides
def get_variables(self) -> Dict[str, Variable]:
assert self.model is not None

@ -124,10 +124,8 @@ class InternalSolver(ABC, EnforceOverrides):
"""
Sets the warm start to be used by the solver.
The solution should be a dictionary following the same format as the
one produced by `get_solution`. Only one warm start is supported.
Calling this function when a warm start already exists will
remove the previous warm start.
Only one warm start is supported. Calling this function when a warm start
already exists will remove the previous warm start.
"""
pass
@ -154,11 +152,8 @@ class InternalSolver(ABC, EnforceOverrides):
@abstractmethod
def fix(self, solution: Solution) -> None:
"""
Fixes the values of a subset of decision variables.
The values should be provided in the dictionary format generated by
`get_solution`. Missing values in the solution indicate variables
that should be left free.
Fixes the values of a subset of decision variables. Missing values in the
solution indicate variables that should be left free.
"""
pass
@ -170,9 +165,7 @@ class InternalSolver(ABC, EnforceOverrides):
with higher priority are picked first, given that they are fractional.
Ties are solved arbitrarily. By default, all variables have priority zero.
The priorities should be provided in the dictionary format generated by
`get_solution`. Missing values indicate variables whose priorities
should not be modified.
Missing values indicate variables whose priorities should not be modified.
"""
raise NotImplementedError()
@ -216,34 +209,6 @@ class InternalSolver(ABC, EnforceOverrides):
"""
pass
@abstractmethod
def get_dual(self, cid: str) -> float:
"""
If the model is feasible and has been solved to optimality, returns the
optimal value of the dual variable associated with this constraint. If the
model is infeasible, returns a portion of the infeasibility certificate
corresponding to the given constraint.
Only available for relaxed problems. Must be called after solve.
"""
pass
@abstractmethod
def get_sense(self) -> str:
"""
Returns the sense of the problem (either "min" or "max").
"""
pass
@abstractmethod
def get_variable_names(self) -> List[VariableName]:
"""
Returns a list containing the names of all variables in the model. This
method is used by the ML components to query what variables are there in the
model before a solution is available.
"""
pass
@abstractmethod
def clone(self) -> "InternalSolver":
"""

@ -4,7 +4,7 @@
import logging
import traceback
from typing import Optional, List, Any, cast, Callable, Dict, Tuple
from typing import Optional, List, Any, cast, Dict, Tuple
from p_tqdm import p_map
@ -13,7 +13,7 @@ from miplearn.components.dynamic_lazy import DynamicLazyConstraintsComponent
from miplearn.components.dynamic_user_cuts import UserCutsComponent
from miplearn.components.objective import ObjectiveValueComponent
from miplearn.components.primal import PrimalSolutionComponent
from miplearn.features import FeaturesExtractor, TrainingSample, Sample
from miplearn.features import FeaturesExtractor, Sample
from miplearn.instance.base import Instance
from miplearn.instance.picklegz import PickleGzInstance
from miplearn.solvers import _RedirectOutput
@ -138,9 +138,7 @@ class LearningSolver:
# Initialize training sample
# -------------------------------------------------------
training_sample = TrainingSample()
sample = Sample()
instance.training_data.append(training_sample)
instance.samples.append(sample)
# Initialize stats
@ -160,7 +158,6 @@ class LearningSolver:
logger.info("Extracting features (after-load)...")
features = FeaturesExtractor(self.internal_solver).extract(instance)
features.extra = {}
instance.features.__dict__ = features.__dict__
sample.after_load = features
callback_args = (
@ -171,15 +168,6 @@ class LearningSolver:
sample,
)
callback_args_old = (
self,
instance,
model,
stats,
instance.features,
training_sample,
)
# Solve root LP relaxation
# -------------------------------------------------------
lp_stats = None
@ -187,19 +175,14 @@ class LearningSolver:
logger.debug("Running before_solve_lp callbacks...")
for component in self.components.values():
component.before_solve_lp(*callback_args)
component.before_solve_lp_old(*callback_args_old)
logger.info("Solving root LP relaxation...")
lp_stats = self.internal_solver.solve_lp(tee=tee)
stats.update(cast(LearningSolveStats, lp_stats.__dict__))
training_sample.lp_solution = self.internal_solver.get_solution()
training_sample.lp_value = lp_stats.lp_value
training_sample.lp_log = lp_stats.lp_log
logger.debug("Running after_solve_lp callbacks...")
for component in self.components.values():
component.after_solve_lp(*callback_args)
component.after_solve_lp_old(*callback_args_old)
# Extract features (after-lp)
# -------------------------------------------------------
@ -245,7 +228,6 @@ class LearningSolver:
logger.debug("Running before_solve_mip callbacks...")
for component in self.components.values():
component.before_solve_mip(*callback_args)
component.before_solve_mip_old(*callback_args_old)
# Solve MIP
# -------------------------------------------------------
@ -272,19 +254,11 @@ class LearningSolver:
features.extra = {}
sample.after_mip = features
# Add some information to training_sample
# -------------------------------------------------------
training_sample.lower_bound = mip_stats.mip_lower_bound
training_sample.upper_bound = mip_stats.mip_upper_bound
training_sample.mip_log = mip_stats.mip_log
training_sample.solution = self.internal_solver.get_solution()
# After-solve callbacks
# -------------------------------------------------------
logger.debug("Calling after_solve_mip callbacks...")
for component in self.components.values():
component.after_solve_mip(*callback_args)
component.after_solve_mip_old(*callback_args_old)
# Flush
# -------------------------------------------------------
@ -414,12 +388,11 @@ class LearningSolver:
def fit(self, training_instances: List[Instance]) -> None:
if len(training_instances) == 0:
logger.warn("Empty list of training instances provided. Skipping.")
logger.warning("Empty list of training instances provided. Skipping.")
return
for component in self.components.values():
logger.info(f"Fitting {component.__class__.__name__}...")
component.fit(training_instances)
component.fit_old(training_instances)
def _add_component(self, component: Component) -> None:
name = component.__class__.__name__

@ -155,11 +155,6 @@ class BasePyomoSolver(InternalSolver):
"slack",
]
@overrides
def get_dual(self, cid: str) -> float:
constr = self._cname_to_constr[cid]
return self._pyomo_solver.dual[constr]
@overrides
def get_solution(self) -> Optional[Solution]:
assert self.model is not None
@ -173,21 +168,6 @@ class BasePyomoSolver(InternalSolver):
solution[f"{var}[{index}]"] = var[index].value
return solution
@overrides
def get_variable_names(self) -> List[VariableName]:
assert self.model is not None
variables: List[VariableName] = []
for var in self.model.component_objects(Var):
for index in var:
if var[index].fixed:
continue
variables += [f"{var}[{index}]"]
return variables
@overrides
def get_sense(self) -> str:
return self._obj_sense
@overrides
def get_variables(self) -> Dict[str, Variable]:
assert self.model is not None

@ -2,7 +2,7 @@
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from typing import Optional, Dict, Callable, Any, Union, Tuple, TYPE_CHECKING, Hashable
from typing import Optional, Dict, Callable, Any, Union, TYPE_CHECKING, Hashable
from mypy_extensions import TypedDict

@ -9,8 +9,8 @@ from miplearn.features import Features
from miplearn.instance.base import Instance
def test_xy_instance_old() -> None:
def _sample_xy_old(features: Features, sample: str) -> Tuple[Dict, Dict]:
def test_xy_instance() -> None:
def _sample_xy(features: Features, sample: str) -> Tuple[Dict, Dict]:
x = {
"s1": {
"category_a": [
@ -55,12 +55,10 @@ def test_xy_instance_old() -> None:
comp = Component()
instance_1 = Mock(spec=Instance)
instance_1.training_data = ["s1", "s2"]
instance_1.features = {}
instance_1.samples = ["s1", "s2"]
instance_2 = Mock(spec=Instance)
instance_2.training_data = ["s3"]
instance_2.features = {}
comp.sample_xy_old = _sample_xy_old # type: ignore
instance_2.samples = ["s3"]
comp.sample_xy = _sample_xy # type: ignore
x_expected = {
"category_a": [
[1, 2, 3],
@ -96,6 +94,6 @@ def test_xy_instance_old() -> None:
[11],
],
}
x_actual, y_actual = comp.xy_instances_old([instance_1, instance_2])
x_actual, y_actual = comp.xy_instances([instance_1, instance_2])
assert x_actual == x_expected
assert y_actual == y_expected

@ -13,7 +13,6 @@ from miplearn.classifiers.threshold import MinProbabilityThreshold
from miplearn.components import classifier_evaluation_dict
from miplearn.components.dynamic_lazy import DynamicLazyConstraintsComponent
from miplearn.features import (
TrainingSample,
Features,
InstanceFeatures,
Sample,
@ -24,60 +23,6 @@ from miplearn.solvers.tests import assert_equals
E = 0.1
@pytest.fixture
def training_instances_old() -> List[Instance]:
instances = [cast(Instance, Mock(spec=Instance)) for _ in range(2)]
instances[0].features = Features(
instance=InstanceFeatures(
user_features=[50.0],
),
)
instances[0].training_data = [
TrainingSample(lazy_enforced={"c1", "c2"}),
TrainingSample(lazy_enforced={"c2", "c3"}),
]
instances[0].get_constraint_category = Mock( # type: ignore
side_effect=lambda cid: {
"c1": "type-a",
"c2": "type-a",
"c3": "type-b",
"c4": "type-b",
}[cid]
)
instances[0].get_constraint_features = Mock( # type: ignore
side_effect=lambda cid: {
"c1": [1.0, 2.0, 3.0],
"c2": [4.0, 5.0, 6.0],
"c3": [1.0, 2.0],
"c4": [3.0, 4.0],
}[cid]
)
instances[1].features = Features(
instance=InstanceFeatures(
user_features=[80.0],
),
)
instances[1].training_data = [
TrainingSample(lazy_enforced={"c3", "c4"}),
]
instances[1].get_constraint_category = Mock( # type: ignore
side_effect=lambda cid: {
"c1": None,
"c2": "type-a",
"c3": "type-b",
"c4": "type-b",
}[cid]
)
instances[1].get_constraint_features = Mock( # type: ignore
side_effect=lambda cid: {
"c2": [7.0, 8.0, 9.0],
"c3": [5.0, 6.0],
"c4": [7.0, 8.0],
}[cid]
)
return instances
@pytest.fixture
def training_instances() -> List[Instance]:
instances = [cast(Instance, Mock(spec=Instance)) for _ in range(2)]

@ -12,7 +12,7 @@ from gurobipy import GRB
from networkx import Graph
from overrides import overrides
from miplearn import InternalSolver
from miplearn.solvers.learning import InternalSolver
from miplearn.components.dynamic_user_cuts import UserCutsComponent
from miplearn.instance.base import Instance
from miplearn.solvers.gurobi import GurobiSolver

@ -38,17 +38,21 @@ def test_instance() -> None:
)
instance = TravelingSalesmanInstance(n_cities, distances)
solver = LearningSolver()
stats = solver.solve(instance)
solution = instance.training_data[0].solution
assert solution is not None
assert solution["x[(0, 1)]"] == 1.0
assert solution["x[(0, 2)]"] == 0.0
assert solution["x[(0, 3)]"] == 1.0
assert solution["x[(1, 2)]"] == 1.0
assert solution["x[(1, 3)]"] == 0.0
assert solution["x[(2, 3)]"] == 1.0
assert stats["mip_lower_bound"] == 4.0
assert stats["mip_upper_bound"] == 4.0
solver.solve(instance)
assert len(instance.samples) == 1
assert instance.samples[0].after_mip is not None
features = instance.samples[0].after_mip
assert features is not None
assert features.variables is not None
assert features.variables["x[(0, 1)]"].value == 1.0
assert features.variables["x[(0, 2)]"].value == 0.0
assert features.variables["x[(0, 3)]"].value == 1.0
assert features.variables["x[(1, 2)]"].value == 1.0
assert features.variables["x[(1, 3)]"].value == 0.0
assert features.variables["x[(2, 3)]"].value == 1.0
assert features.mip_solve is not None
assert features.mip_solve.mip_lower_bound == 4.0
assert features.mip_solve.mip_upper_bound == 4.0
def test_subtour() -> None:
@ -67,18 +71,20 @@ def test_subtour() -> None:
instance = TravelingSalesmanInstance(n_cities, distances)
solver = LearningSolver()
solver.solve(instance)
assert len(instance.samples) == 1
assert instance.samples[0].after_mip is not None
assert instance.samples[0].after_mip.extra is not None
lazy_enforced = instance.samples[0].after_mip.extra["lazy_enforced"]
features = instance.samples[0].after_mip
assert features.extra is not None
assert "lazy_enforced" in features.extra
lazy_enforced = features.extra["lazy_enforced"]
assert lazy_enforced is not None
assert len(lazy_enforced) > 0
solution = instance.training_data[0].solution
assert solution is not None
assert solution["x[(0, 1)]"] == 1.0
assert solution["x[(0, 4)]"] == 1.0
assert solution["x[(1, 2)]"] == 1.0
assert solution["x[(2, 3)]"] == 1.0
assert solution["x[(3, 5)]"] == 1.0
assert solution["x[(4, 5)]"] == 1.0
assert features.variables is not None
assert features.variables["x[(0, 1)]"].value == 1.0
assert features.variables["x[(0, 4)]"].value == 1.0
assert features.variables["x[(1, 2)]"].value == 1.0
assert features.variables["x[(2, 3)]"].value == 1.0
assert features.variables["x[(3, 5)]"].value == 1.0
assert features.variables["x[(4, 5)]"].value == 1.0
solver.fit([instance])
solver.solve(instance)

@ -34,29 +34,38 @@ def test_learning_solver(
)
solver.solve(instance)
assert hasattr(instance, "features")
sample = instance.training_data[0]
assert sample.solution is not None
assert sample.solution["x[0]"] == 1.0
assert sample.solution["x[1]"] == 0.0
assert sample.solution["x[2]"] == 1.0
assert sample.solution["x[3]"] == 1.0
assert sample.lower_bound == 1183.0
assert sample.upper_bound == 1183.0
assert sample.lp_solution is not None
assert sample.lp_solution["x[0]"] is not None
assert sample.lp_solution["x[1]"] is not None
assert sample.lp_solution["x[2]"] is not None
assert sample.lp_solution["x[3]"] is not None
assert round(sample.lp_solution["x[0]"], 3) == 1.000
assert round(sample.lp_solution["x[1]"], 3) == 0.923
assert round(sample.lp_solution["x[2]"], 3) == 1.000
assert round(sample.lp_solution["x[3]"], 3) == 0.000
assert sample.lp_value is not None
assert round(sample.lp_value, 3) == 1287.923
assert sample.mip_log is not None
assert len(sample.mip_log) > 100
assert len(instance.samples) > 0
sample = instance.samples[0]
after_mip = sample.after_mip
assert after_mip is not None
assert after_mip.variables is not None
assert after_mip.mip_solve is not None
assert after_mip.variables["x[0]"].value == 1.0
assert after_mip.variables["x[1]"].value == 0.0
assert after_mip.variables["x[2]"].value == 1.0
assert after_mip.variables["x[3]"].value == 1.0
assert after_mip.mip_solve.mip_lower_bound == 1183.0
assert after_mip.mip_solve.mip_upper_bound == 1183.0
assert after_mip.mip_solve.mip_log is not None
assert len(after_mip.mip_solve.mip_log) > 100
after_lp = sample.after_lp
assert after_lp is not None
assert after_lp.variables is not None
assert after_lp.lp_solve is not None
assert after_lp.variables["x[0]"].value is not None
assert after_lp.variables["x[1]"].value is not None
assert after_lp.variables["x[2]"].value is not None
assert after_lp.variables["x[3]"].value is not None
assert round(after_lp.variables["x[0]"].value, 3) == 1.000
assert round(after_lp.variables["x[1]"].value, 3) == 0.923
assert round(after_lp.variables["x[2]"].value, 3) == 1.000
assert round(after_lp.variables["x[3]"].value, 3) == 0.000
assert after_lp.lp_solve.lp_value is not None
assert round(after_lp.lp_solve.lp_value, 3) == 1287.923
assert after_lp.lp_solve.lp_log is not None
assert len(after_lp.lp_solve.lp_log) > 100
solver.fit([instance])
solver.solve(instance)
@ -90,9 +99,7 @@ def test_parallel_solve(
results = solver.parallel_solve(instances, n_jobs=3)
assert len(results) == 10
for instance in instances:
data = instance.training_data[0]
assert data.solution is not None
assert len(data.solution.keys()) == 5
assert len(instance.samples) == 1
def test_solve_fit_from_disk(
@ -111,19 +118,13 @@ def test_solve_fit_from_disk(
solver = LearningSolver(solver=internal_solver)
solver.solve(instances[0])
instance_loaded = read_pickle_gz(cast(PickleGzInstance, instances[0]).filename)
assert len(instance_loaded.training_data) > 0
assert instance_loaded.features.instance is not None
assert instance_loaded.features.variables is not None
assert instance_loaded.features.constraints is not None
assert len(instance_loaded.samples) > 0
# Test: parallel_solve
solver.parallel_solve(instances)
for instance in instances:
instance_loaded = read_pickle_gz(cast(PickleGzInstance, instance).filename)
assert len(instance_loaded.training_data) > 0
assert instance_loaded.features.instance is not None
assert instance_loaded.features.variables is not None
assert instance_loaded.features.constraints is not None
assert len(instance_loaded.samples) > 0
# Delete temporary files
for instance in instances:

Loading…
Cancel
Save