Remove experimental LP components

master
Alinson S. Xavier 5 years ago
parent f90f295620
commit f495297168
No known key found for this signature in database
GPG Key ID: DCA0DAD4D2F58624

@ -13,9 +13,6 @@ from .components.dynamic_user_cuts import UserCutsComponent
from .components.objective import ObjectiveValueComponent
from .components.primal import PrimalSolutionComponent
from .components.static_lazy import StaticLazyConstraintsComponent
from .components.steps.convert_tight import ConvertTightIneqsIntoEqsStep
from .components.steps.drop_redundant import DropRedundantInequalitiesStep
from .components.steps.relax_integrality import RelaxIntegralityStep
from .instance.base import Instance
from .instance.picklegz import (
PickleGzInstance,

@ -38,7 +38,7 @@ class CountingClassifier(Classifier):
n_samples = x_test.shape[0]
return np.array([self.mean for _ in range(n_samples)])
def __repr__(self):
def __repr__(self) -> str:
return "CountingClassifier(mean=%s)" % self.mean
def clone(self) -> "CountingClassifier":

@ -85,8 +85,8 @@ class DynamicConstraintsComponent(Component):
self,
instance: "Instance",
sample: TrainingSample,
) -> List[str]:
pred: List[str] = []
) -> List[Hashable]:
pred: List[Hashable] = []
x, _, cids = self.sample_xy_with_cids(instance, sample)
for category in x.keys():
assert category in self.classifiers

@ -3,21 +3,23 @@
# Released under the modified BSD license. See COPYING.md for more details.
import logging
from typing import Dict, List, TYPE_CHECKING, Hashable, Tuple
from typing import Dict, List, TYPE_CHECKING, Hashable, Tuple, Any
import numpy as np
from miplearn.instance.base import Instance
from miplearn.classifiers import Classifier
from miplearn.classifiers.counting import CountingClassifier
from miplearn.classifiers.threshold import MinProbabilityThreshold, Threshold
from miplearn.components.component import Component
from miplearn.components.dynamic_common import DynamicConstraintsComponent
from miplearn.features import TrainingSample
from miplearn.features import TrainingSample, Features
from miplearn.types import LearningSolveStats
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from miplearn.solvers.learning import Instance
from miplearn.solvers.learning import LearningSolver
class DynamicLazyConstraintsComponent(Component):
@ -40,34 +42,47 @@ class DynamicLazyConstraintsComponent(Component):
self.known_cids = self.dynamic.known_cids
@staticmethod
def enforce(cids, instance, model, solver):
def enforce(
cids: List[Hashable],
instance: Instance,
model: Any,
solver: "LearningSolver",
) -> None:
assert solver.internal_solver is not None
for cid in cids:
cobj = instance.build_lazy_constraint(model, cid)
solver.internal_solver.add_constraint(cobj)
def before_solve_mip(
self,
solver,
instance,
model,
stats,
features,
training_data,
):
solver: "LearningSolver",
instance: Instance,
model: Any,
stats: LearningSolveStats,
features: Features,
training_data: TrainingSample,
) -> None:
training_data.lazy_enforced = set()
logger.info("Predicting violated lazy constraints...")
cids = self.dynamic.sample_predict(instance, training_data)
logger.info("Enforcing %d lazy constraints..." % len(cids))
self.enforce(cids, instance, model, solver)
def iteration_cb(self, solver, instance, model):
def iteration_cb(
self,
solver: "LearningSolver",
instance: Instance,
model: Any,
) -> bool:
logger.debug("Finding violated lazy constraints...")
cids = instance.find_violated_lazy_constraints(model)
if len(cids) == 0:
logger.debug("No violations found")
return False
else:
instance.training_data[-1].lazy_enforced |= set(cids)
sample = instance.training_data[-1]
assert sample.lazy_enforced is not None
sample.lazy_enforced |= set(cids)
logger.debug(" %d violations found" % len(cids))
self.enforce(cids, instance, model, solver)
return True
@ -85,7 +100,7 @@ class DynamicLazyConstraintsComponent(Component):
self,
instance: "Instance",
sample: TrainingSample,
) -> List[str]:
) -> List[Hashable]:
return self.dynamic.sample_predict(instance, sample)
def fit(self, training_instances: List["Instance"]) -> None:

@ -104,7 +104,7 @@ class UserCutsComponent(Component):
self,
instance: "Instance",
sample: TrainingSample,
) -> List[str]:
) -> List[Hashable]:
return self.dynamic.sample_predict(instance, sample)
def fit(self, training_instances: List["Instance"]) -> None:

@ -45,7 +45,7 @@ class StaticLazyConstraintsComponent(Component):
self.thresholds: Dict[Hashable, Threshold] = {}
self.pool: Dict[str, LazyConstraint] = {}
self.violation_tolerance: float = violation_tolerance
self.enforced_cids: Set[str] = set()
self.enforced_cids: Set[Hashable] = set()
self.n_restored: int = 0
self.n_iterations: int = 0
@ -145,11 +145,11 @@ class StaticLazyConstraintsComponent(Component):
self,
instance: "Instance",
sample: TrainingSample,
) -> List[str]:
) -> List[Hashable]:
assert instance.features.constraints is not None
x, y = self.sample_xy(instance, sample)
category_to_cids: Dict[Hashable, List[str]] = {}
category_to_cids: Dict[Hashable, List[Hashable]] = {}
for (cid, cfeatures) in instance.features.constraints.items():
if cfeatures.category is None:
continue
@ -157,7 +157,7 @@ class StaticLazyConstraintsComponent(Component):
if category not in category_to_cids:
category_to_cids[category] = []
category_to_cids[category] += [cid]
enforced_cids: List[str] = []
enforced_cids: List[Hashable] = []
for category in x.keys():
if category not in self.classifiers:
continue

@ -1,3 +0,0 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.

@ -1,249 +0,0 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
import logging
import random
from copy import deepcopy
import numpy as np
from tqdm import tqdm
from miplearn.classifiers.counting import CountingClassifier
from miplearn.components import classifier_evaluation_dict
from miplearn.components.component import Component
from miplearn.components.steps.drop_redundant import DropRedundantInequalitiesStep
logger = logging.getLogger(__name__)
class ConvertTightIneqsIntoEqsStep(Component):
"""
Component that predicts which inequality constraints are likely to be binding in
the LP relaxation of the problem and converts them into equality constraints.
This component always makes sure that the conversion process does not affect the
feasibility of the problem. It can also, optionally, make sure that it does not affect
the optimality, but this may be expensive.
This component does not work on MIPs. All integrality constraints must be relaxed
before this component is used.
"""
def __init__(
self,
classifier=CountingClassifier(),
threshold=0.95,
slack_tolerance=0.0,
check_optimality=False,
):
self.classifiers = {}
self.classifier_prototype = classifier
self.threshold = threshold
self.slack_tolerance = slack_tolerance
self.check_optimality = check_optimality
self.converted = []
self.original_sense = {}
self.n_restored = 0
self.n_infeasible_iterations = 0
self.n_suboptimal_iterations = 0
def before_solve_mip(
self,
solver,
instance,
model,
stats,
features,
training_data,
):
self.n_restored = 0
self.n_infeasible_iterations = 0
self.n_suboptimal_iterations = 0
logger.info("Predicting tight LP constraints...")
x, constraints = DropRedundantInequalitiesStep.x(
instance,
constraint_ids=solver.internal_solver.get_constraint_ids(),
)
y = self.predict(x)
n_converted = 0
n_kept = 0
for category in y.keys():
for i in range(len(y[category])):
if y[category][i][0] == 1:
cid = constraints[category][i]
s = solver.internal_solver.get_constraint_sense(cid)
self.original_sense[cid] = s
solver.internal_solver.set_constraint_sense(cid, "=")
self.converted += [cid]
n_converted += 1
else:
n_kept += 1
stats["ConvertTight: Kept"] = n_kept
stats["ConvertTight: Converted"] = n_converted
logger.info(f"Converted {n_converted} inequalities")
def after_solve_mip(
self,
solver,
instance,
model,
stats,
features,
training_data,
):
if training_data.slacks is None:
training_data.slacks = solver.internal_solver.get_inequality_slacks()
stats["ConvertTight: Restored"] = self.n_restored
stats["ConvertTight: Inf iterations"] = self.n_infeasible_iterations
stats["ConvertTight: Subopt iterations"] = self.n_suboptimal_iterations
def fit(self, training_instances):
logger.debug("Extracting x and y...")
x = self.x(training_instances)
y = self.y(training_instances)
logger.debug("Fitting...")
for category in tqdm(x.keys(), desc="Fit (rlx:conv_ineqs)"):
if category not in self.classifiers:
self.classifiers[category] = deepcopy(self.classifier_prototype)
self.classifiers[category].fit(x[category], y[category])
@staticmethod
def _x_train(instances):
x = {}
for instance in tqdm(
instances,
desc="Extract (drop:x)",
disable=len(instances) < 5,
):
for training_data in instance.training_data:
cids = training_data.slacks.keys()
for cid in cids:
category = instance.get_constraint_category(cid)
if category is None:
continue
if category not in x:
x[category] = []
x[category] += [instance.get_constraint_features(cid)]
for category in x.keys():
x[category] = np.array(x[category])
return x
def x(self, instances):
return self._x_train(instances)
def y(self, instances):
y = {}
for instance in tqdm(
instances,
desc="Extract (rlx:conv_ineqs:y)",
disable=len(instances) < 5,
):
for (cid, slack) in instance.training_data[0].slacks.items():
category = instance.get_constraint_category(cid)
if category is None:
continue
if category not in y:
y[category] = []
if 0 <= slack <= self.slack_tolerance:
y[category] += [[False, True]]
else:
y[category] += [[True, False]]
for category in y.keys():
y[category] = np.array(y[category], dtype=np.bool8)
return y
def predict(self, x):
y = {}
for (category, x_cat) in x.items():
if category not in self.classifiers:
continue
y[category] = []
x_cat = np.array(x_cat)
proba = self.classifiers[category].predict_proba(x_cat)
for i in range(len(proba)):
if proba[i][1] >= self.threshold:
y[category] += [[1]]
else:
y[category] += [[0]]
return y
def evaluate(self, instance):
x = self.x([instance])
y_true = self.y([instance])
y_pred = self.predict(x)
tp, tn, fp, fn = 0, 0, 0, 0
for category in y_true.keys():
for i in range(len(y_true[category])):
if y_pred[category][i][0] == 1:
if y_true[category][i][0] == 1:
tp += 1
else:
fp += 1
else:
if y_true[category][i][0] == 1:
fn += 1
else:
tn += 1
return classifier_evaluation_dict(tp, tn, fp, fn)
def iteration_cb(self, solver, instance, model):
is_infeasible, is_suboptimal = False, False
restored = []
def check_pi(msense, csense, pi):
if csense == "=":
return True
if msense == "max":
if csense == "<":
return pi >= 0
else:
return pi <= 0
else:
if csense == ">":
return pi >= 0
else:
return pi <= 0
def restore(cid):
nonlocal restored
csense = self.original_sense[cid]
solver.internal_solver.set_constraint_sense(cid, csense)
restored += [cid]
if solver.internal_solver.is_infeasible():
for cid in self.converted:
pi = solver.internal_solver.get_dual(cid)
if abs(pi) > 0:
is_infeasible = True
restore(cid)
elif self.check_optimality:
random.shuffle(self.converted)
n_restored = 0
for cid in self.converted:
if n_restored >= 100:
break
pi = solver.internal_solver.get_dual(cid)
csense = self.original_sense[cid]
msense = solver.internal_solver.get_sense()
if not check_pi(msense, csense, pi):
is_suboptimal = True
restore(cid)
n_restored += 1
for cid in restored:
self.converted.remove(cid)
if len(restored) > 0:
self.n_restored += len(restored)
if is_infeasible:
self.n_infeasible_iterations += 1
if is_suboptimal:
self.n_suboptimal_iterations += 1
logger.info(f"Restored {len(restored)} inequalities")
return True
else:
return False

@ -1,240 +0,0 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
import logging
from copy import deepcopy
import numpy as np
from p_tqdm import p_umap
from tqdm import tqdm
from miplearn.classifiers.counting import CountingClassifier
from miplearn.components import classifier_evaluation_dict
from miplearn.components.component import Component
from miplearn.components.static_lazy import LazyConstraint
logger = logging.getLogger(__name__)
class DropRedundantInequalitiesStep(Component):
"""
Component that predicts which inequalities are likely loose in the LP and removes
them. Optionally, double checks after the problem is solved that all dropped
inequalities were in fact redundant, and, if not, re-adds them to the problem.
This component does not work on MIPs. All integrality constraints must be relaxed
before this component is used.
"""
def __init__(
self,
classifier=CountingClassifier(),
threshold=0.95,
slack_tolerance=1e-5,
check_feasibility=True,
violation_tolerance=1e-5,
max_iterations=3,
):
self.classifiers = {}
self.classifier_prototype = classifier
self.threshold = threshold
self.slack_tolerance = slack_tolerance
self.pool = []
self.check_feasibility = check_feasibility
self.violation_tolerance = violation_tolerance
self.max_iterations = max_iterations
self.current_iteration = 0
self.n_iterations = 0
self.n_restored = 0
def before_solve_mip(
self,
solver,
instance,
model,
stats,
features,
training_data,
):
self.n_iterations = 0
self.n_restored = 0
self.current_iteration = 0
logger.info("Predicting redundant LP constraints...")
x, constraints = self.x(
instance,
constraint_ids=solver.internal_solver.get_constraint_ids(),
)
y = self.predict(x)
self.pool = []
n_dropped = 0
n_kept = 0
for category in y.keys():
for i in range(len(y[category])):
if y[category][i][1] == 1:
cid = constraints[category][i]
c = LazyConstraint(
cid=cid,
obj=solver.internal_solver.extract_constraint(cid),
)
self.pool += [c]
n_dropped += 1
else:
n_kept += 1
stats["DropRedundant: Kept"] = n_kept
stats["DropRedundant: Dropped"] = n_dropped
logger.info(f"Extracted {n_dropped} predicted constraints")
def after_solve_mip(
self,
solver,
instance,
model,
stats,
features,
training_data,
):
if training_data.slacks is None:
training_data.slacks = solver.internal_solver.get_inequality_slacks()
stats["DropRedundant: Iterations"] = self.n_iterations
stats["DropRedundant: Restored"] = self.n_restored
def fit(self, training_instances, n_jobs=1):
x, y = self.x_y(training_instances, n_jobs=n_jobs)
for category in tqdm(x.keys(), desc="Fit (drop)"):
if category not in self.classifiers:
self.classifiers[category] = deepcopy(self.classifier_prototype)
self.classifiers[category].fit(x[category], np.array(y[category]))
@staticmethod
def x(instance, constraint_ids):
x = {}
constraints = {}
cids = constraint_ids
for cid in cids:
category = instance.get_constraint_category(cid)
if category is None:
continue
if category not in x:
x[category] = []
constraints[category] = []
x[category] += [instance.get_constraint_features(cid)]
constraints[category] += [cid]
for category in x.keys():
x[category] = np.array(x[category])
return x, constraints
def x_y(self, instances, n_jobs=1):
def _extract(instance):
x = {}
y = {}
for training_data in instance.training_data:
for (cid, slack) in training_data.slacks.items():
category = instance.get_constraint_category(cid)
if category is None:
continue
if category not in x:
x[category] = []
if category not in y:
y[category] = []
if slack > self.slack_tolerance:
y[category] += [[False, True]]
else:
y[category] += [[True, False]]
x[category] += [instance.get_constraint_features(cid)]
return x, y
if n_jobs == 1:
results = [_extract(i) for i in tqdm(instances, desc="Extract (drop 1/3)")]
else:
results = p_umap(
_extract,
instances,
num_cpus=n_jobs,
desc="Extract (drop 1/3)",
)
x_combined = {}
y_combined = {}
for (x, y) in tqdm(results, desc="Extract (drop 2/3)"):
for category in x.keys():
if category not in x_combined:
x_combined[category] = []
y_combined[category] = []
x_combined[category] += x[category]
y_combined[category] += y[category]
for category in tqdm(x_combined.keys(), desc="Extract (drop 3/3)"):
x_combined[category] = np.array(x_combined[category])
y_combined[category] = np.array(y_combined[category])
return x_combined, y_combined
def predict(self, x):
y = {}
for (category, x_cat) in x.items():
if category not in self.classifiers:
continue
y[category] = []
x_cat = np.array(x_cat)
proba = self.classifiers[category].predict_proba(x_cat)
for i in range(len(proba)):
if proba[i][1] >= self.threshold:
y[category] += [[False, True]]
else:
y[category] += [[True, False]]
return y
def evaluate(self, instance, n_jobs=1):
x, y_true = self.x_y([instance], n_jobs=n_jobs)
y_pred = self.predict(x)
tp, tn, fp, fn = 0, 0, 0, 0
for category in tqdm(
y_true.keys(),
disable=len(y_true) < 100,
desc="Eval (drop)",
):
for i in range(len(y_true[category])):
if (category in y_pred) and (y_pred[category][i][1] == 1):
if y_true[category][i][1] == 1:
tp += 1
else:
fp += 1
else:
if y_true[category][i][1] == 1:
fn += 1
else:
tn += 1
return classifier_evaluation_dict(tp, tn, fp, fn)
def iteration_cb(self, solver, instance, model):
if not self.check_feasibility:
return False
if self.current_iteration >= self.max_iterations:
return False
if solver.internal_solver.is_infeasible():
return False
self.current_iteration += 1
logger.debug("Checking that dropped constraints are satisfied...")
constraints_to_add = []
for c in self.pool:
if not solver.internal_solver.is_constraint_satisfied(
c.obj,
self.violation_tolerance,
):
constraints_to_add.append(c)
for c in constraints_to_add:
self.pool.remove(c)
solver.internal_solver.add_constraint(c.obj)
if len(constraints_to_add) > 0:
self.n_restored += len(constraints_to_add)
logger.info(
"%8d constraints %8d in the pool"
% (len(constraints_to_add), len(self.pool))
)
self.n_iterations += 1
return True
else:
return False

@ -1,27 +0,0 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
import logging
from miplearn.components.component import Component
logger = logging.getLogger(__name__)
class RelaxIntegralityStep(Component):
"""
Component that relaxes all integrality constraints before the problem is solved.
"""
def before_solve_mip(
self,
solver,
instance,
model,
stats,
features,
training_data,
):
logger.info("Relaxing integrality...")
solver.internal_solver.relax()

@ -19,7 +19,7 @@ class TrainingSample:
lp_log: Optional[str] = None
lp_solution: Optional[Solution] = None
lp_value: Optional[float] = None
lazy_enforced: Optional[Set[str]] = None
lazy_enforced: Optional[Set[Hashable]] = None
lower_bound: Optional[float] = None
mip_log: Optional[str] = None
solution: Optional[Solution] = None

@ -104,13 +104,13 @@ class Instance(ABC):
def has_static_lazy_constraints(self) -> bool:
return False
def has_dynamic_lazy_constraints(self):
def has_dynamic_lazy_constraints(self) -> bool:
return False
def is_constraint_lazy(self, cid: str) -> bool:
return False
def find_violated_lazy_constraints(self, model):
def find_violated_lazy_constraints(self, model: Any) -> List[Hashable]:
"""
Returns lazy constraint violations found for the current solution.
@ -129,7 +129,7 @@ class Instance(ABC):
"""
return []
def build_lazy_constraint(self, model, violation):
def build_lazy_constraint(self, model: Any, violation: Hashable) -> Any:
"""
Returns a Pyomo constraint which fixes a given violation.

@ -5,14 +5,14 @@
import gzip
import os
import pickle
from typing import Optional, Any, List, Hashable, cast, IO
from typing import Optional, Any, List, Hashable, cast, IO, Callable
from miplearn.instance.base import logger, Instance
from miplearn.types import VarIndex
def lazy_load(func):
def inner(self, *args):
def lazy_load(func: Callable) -> Callable:
def inner(self: Any, *args: Any) -> Any:
if self.instance is None:
self.instance = self._load()
self.features = self.instance.features
@ -81,7 +81,7 @@ class PickleGzInstance(Instance):
return self.instance.has_static_lazy_constraints()
@lazy_load
def has_dynamic_lazy_constraints(self):
def has_dynamic_lazy_constraints(self) -> bool:
assert self.instance is not None
return self.instance.has_dynamic_lazy_constraints()
@ -91,22 +91,22 @@ class PickleGzInstance(Instance):
return self.instance.is_constraint_lazy(cid)
@lazy_load
def find_violated_lazy_constraints(self, model):
def find_violated_lazy_constraints(self, model: Any) -> List[Hashable]:
assert self.instance is not None
return self.instance.find_violated_lazy_constraints(model)
@lazy_load
def build_lazy_constraint(self, model, violation):
def build_lazy_constraint(self, model: Any, violation: Hashable) -> Any:
assert self.instance is not None
return self.instance.build_lazy_constraint(model, violation)
@lazy_load
def find_violated_user_cuts(self, model):
def find_violated_user_cuts(self, model: Any) -> List[Hashable]:
assert self.instance is not None
return self.instance.find_violated_user_cuts(model)
@lazy_load
def build_user_cut(self, model, violation):
def build_user_cut(self, model: Any, violation: Hashable) -> Any:
assert self.instance is not None
return self.instance.build_user_cut(model, violation)

@ -1,3 +0,0 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.

@ -1,127 +0,0 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from unittest.mock import Mock
from miplearn.classifiers import Classifier
from miplearn.components.steps.convert_tight import ConvertTightIneqsIntoEqsStep
from miplearn.components.steps.relax_integrality import RelaxIntegralityStep
from miplearn.instance.base import Instance
from miplearn.problems.knapsack import GurobiKnapsackInstance
from miplearn.solvers.gurobi import GurobiSolver
from miplearn.solvers.learning import LearningSolver
def test_convert_tight_usage():
instance = GurobiKnapsackInstance(
weights=[3.0, 5.0, 10.0],
prices=[1.0, 1.0, 1.0],
capacity=16.0,
)
solver = LearningSolver(
solver=GurobiSolver,
components=[
RelaxIntegralityStep(),
ConvertTightIneqsIntoEqsStep(),
],
)
# Solve original problem
stats = solver.solve(instance)
original_upper_bound = stats["Upper bound"]
# Should collect training data
assert instance.training_data[0].slacks["eq_capacity"] == 0.0
# Fit and resolve
solver.fit([instance])
stats = solver.solve(instance)
# Objective value should be the same
assert stats["Upper bound"] == original_upper_bound
assert stats["ConvertTight: Inf iterations"] == 0
assert stats["ConvertTight: Subopt iterations"] == 0
class SampleInstance(Instance):
def to_model(self):
import gurobipy as grb
m = grb.Model("model")
x1 = m.addVar(name="x1")
x2 = m.addVar(name="x2")
m.setObjective(x1 + 2 * x2, grb.GRB.MAXIMIZE)
m.addConstr(x1 <= 2, name="c1")
m.addConstr(x2 <= 2, name="c2")
m.addConstr(x1 + x2 <= 3, name="c2")
return m
def test_convert_tight_infeasibility():
comp = ConvertTightIneqsIntoEqsStep()
comp.classifiers = {
"c1": Mock(spec=Classifier),
"c2": Mock(spec=Classifier),
"c3": Mock(spec=Classifier),
}
comp.classifiers["c1"].predict_proba = Mock(return_value=[[0, 1]])
comp.classifiers["c2"].predict_proba = Mock(return_value=[[0, 1]])
comp.classifiers["c3"].predict_proba = Mock(return_value=[[1, 0]])
solver = LearningSolver(
solver=GurobiSolver,
components=[comp],
solve_lp=False,
)
instance = SampleInstance()
stats = solver.solve(instance)
assert stats["Upper bound"] == 5.0
assert stats["ConvertTight: Inf iterations"] == 1
assert stats["ConvertTight: Subopt iterations"] == 0
def test_convert_tight_suboptimality():
comp = ConvertTightIneqsIntoEqsStep(check_optimality=True)
comp.classifiers = {
"c1": Mock(spec=Classifier),
"c2": Mock(spec=Classifier),
"c3": Mock(spec=Classifier),
}
comp.classifiers["c1"].predict_proba = Mock(return_value=[[0, 1]])
comp.classifiers["c2"].predict_proba = Mock(return_value=[[1, 0]])
comp.classifiers["c3"].predict_proba = Mock(return_value=[[0, 1]])
solver = LearningSolver(
solver=GurobiSolver,
components=[comp],
solve_lp=False,
)
instance = SampleInstance()
stats = solver.solve(instance)
assert stats["Upper bound"] == 5.0
assert stats["ConvertTight: Inf iterations"] == 0
assert stats["ConvertTight: Subopt iterations"] == 1
def test_convert_tight_optimal():
comp = ConvertTightIneqsIntoEqsStep()
comp.classifiers = {
"c1": Mock(spec=Classifier),
"c2": Mock(spec=Classifier),
"c3": Mock(spec=Classifier),
}
comp.classifiers["c1"].predict_proba = Mock(return_value=[[1, 0]])
comp.classifiers["c2"].predict_proba = Mock(return_value=[[0, 1]])
comp.classifiers["c3"].predict_proba = Mock(return_value=[[0, 1]])
solver = LearningSolver(
solver=GurobiSolver,
components=[comp],
solve_lp=False,
)
instance = SampleInstance()
stats = solver.solve(instance)
assert stats["Upper bound"] == 5.0
assert stats["ConvertTight: Inf iterations"] == 0
assert stats["ConvertTight: Subopt iterations"] == 0

@ -1,439 +0,0 @@
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from unittest.mock import Mock, call
import numpy as np
from miplearn.classifiers import Classifier
from miplearn.components.steps.drop_redundant import DropRedundantInequalitiesStep
from miplearn.components.steps.relax_integrality import RelaxIntegralityStep
from miplearn.features import TrainingSample, Features
from miplearn.instance.base import Instance
from miplearn.solvers.gurobi import GurobiSolver
from miplearn.solvers.internal import InternalSolver
from miplearn.solvers.learning import LearningSolver
from tests.fixtures.infeasible import get_infeasible_instance
from tests.fixtures.redundant import get_instance_with_redundancy
def _setup():
solver = Mock(spec=LearningSolver)
internal = solver.internal_solver = Mock(spec=InternalSolver)
internal.get_constraint_ids = Mock(return_value=["c1", "c2", "c3", "c4"])
internal.get_inequality_slacks = Mock(
side_effect=lambda: {
"c1": 0.5,
"c2": 0.0,
"c3": 0.0,
"c4": 1.4,
}
)
internal.extract_constraint = Mock(side_effect=lambda cid: "<%s>" % cid)
internal.is_constraint_satisfied = Mock(return_value=False)
internal.is_infeasible = Mock(return_value=False)
instance = Mock(spec=Instance)
instance.get_constraint_features = Mock(
side_effect=lambda cid: {
"c2": np.array([1.0, 0.0]),
"c3": np.array([0.5, 0.5]),
"c4": np.array([1.0]),
}[cid]
)
instance.get_constraint_category = Mock(
side_effect=lambda cid: {
"c1": None,
"c2": "type-a",
"c3": "type-a",
"c4": "type-b",
}[cid]
)
classifiers = {
"type-a": Mock(spec=Classifier),
"type-b": Mock(spec=Classifier),
}
classifiers["type-a"].predict_proba = Mock(
return_value=np.array(
[
[0.20, 0.80],
[0.05, 0.95],
]
)
)
classifiers["type-b"].predict_proba = Mock(
return_value=np.array(
[
[0.02, 0.98],
]
)
)
return solver, internal, instance, classifiers
def test_drop_redundant():
solver, internal, instance, classifiers = _setup()
component = DropRedundantInequalitiesStep()
component.classifiers = classifiers
# LearningSolver calls before_solve
component.before_solve_mip(
solver=solver,
instance=instance,
model=None,
stats={},
features=Features(),
training_data=TrainingSample(),
)
# Should query list of constraints
internal.get_constraint_ids.assert_called_once()
# Should query category and features for each constraint in the model
assert instance.get_constraint_category.call_count == 4
instance.get_constraint_category.assert_has_calls(
[
call("c1"),
call("c2"),
call("c3"),
call("c4"),
]
)
# For constraint with non-null categories, should ask for features
assert instance.get_constraint_features.call_count == 3
instance.get_constraint_features.assert_has_calls(
[
call("c2"),
call("c3"),
call("c4"),
]
)
# Should ask ML to predict whether constraint should be removed
type_a_actual = component.classifiers["type-a"].predict_proba.call_args[0][0]
type_b_actual = component.classifiers["type-b"].predict_proba.call_args[0][0]
np.testing.assert_array_equal(type_a_actual, np.array([[1.0, 0.0], [0.5, 0.5]]))
np.testing.assert_array_equal(type_b_actual, np.array([[1.0]]))
# Should ask internal solver to remove constraints predicted as redundant
assert internal.extract_constraint.call_count == 2
internal.extract_constraint.assert_has_calls(
[
call("c3"),
call("c4"),
]
)
# LearningSolver calls after_solve
training_data = TrainingSample()
component.after_solve_mip(
solver=solver,
instance=instance,
model=None,
stats={},
features=Features(),
training_data=training_data,
)
# Should query slack for all inequalities
internal.get_inequality_slacks.assert_called_once()
# Should store constraint slacks in instance object
assert training_data.slacks == {
"c1": 0.5,
"c2": 0.0,
"c3": 0.0,
"c4": 1.4,
}
def test_drop_redundant_with_check_feasibility():
solver, internal, instance, classifiers = _setup()
component = DropRedundantInequalitiesStep(
check_feasibility=True,
violation_tolerance=1e-3,
)
component.classifiers = classifiers
# LearningSolver call before_solve
component.before_solve_mip(
solver=solver,
instance=instance,
model=None,
stats={},
features=Features(),
training_data=TrainingSample(),
)
# Assert constraints are extracted
assert internal.extract_constraint.call_count == 2
internal.extract_constraint.assert_has_calls(
[
call("c3"),
call("c4"),
]
)
# LearningSolver calls iteration_cb (first time)
should_repeat = component.iteration_cb(solver, instance, None)
# Should ask LearningSolver to repeat
assert should_repeat
# Should ask solver if removed constraints are satisfied (mock always returns false)
internal.is_constraint_satisfied.assert_has_calls(
[
call("<c3>", 1e-3),
call("<c4>", 1e-3),
]
)
# Should add constraints back to LP relaxation
internal.add_constraint.assert_has_calls([call("<c3>"), call("<c4>")])
# LearningSolver calls iteration_cb (second time)
should_repeat = component.iteration_cb(solver, instance, None)
assert not should_repeat
def test_x_y_fit_predict_evaluate():
instances = [Mock(spec=Instance), Mock(spec=Instance)]
component = DropRedundantInequalitiesStep(slack_tolerance=0.05, threshold=0.80)
component.classifiers = {
"type-a": Mock(spec=Classifier),
"type-b": Mock(spec=Classifier),
}
component.classifiers["type-a"].predict_proba = Mock(
return_value=[
np.array([0.20, 0.80]),
]
)
component.classifiers["type-b"].predict_proba = Mock(
return_value=np.array(
[
[0.50, 0.50],
[0.05, 0.95],
]
)
)
# First mock instance
instances[0].training_data = [
TrainingSample(
slacks={
"c1": 0.00,
"c2": 0.05,
"c3": 0.00,
"c4": 30.0,
}
)
]
instances[0].get_constraint_category = Mock(
side_effect=lambda cid: {
"c1": None,
"c2": "type-a",
"c3": "type-a",
"c4": "type-b",
}[cid]
)
instances[0].get_constraint_features = Mock(
side_effect=lambda cid: {
"c2": np.array([1.0, 0.0]),
"c3": np.array([0.5, 0.5]),
"c4": np.array([1.0]),
}[cid]
)
# Second mock instance
instances[1].training_data = [
TrainingSample(
slacks={
"c1": 0.00,
"c3": 0.30,
"c4": 0.00,
"c5": 0.00,
}
)
]
instances[1].get_constraint_category = Mock(
side_effect=lambda cid: {
"c1": None,
"c3": "type-a",
"c4": "type-b",
"c5": "type-b",
}[cid]
)
instances[1].get_constraint_features = Mock(
side_effect=lambda cid: {
"c3": np.array([0.3, 0.4]),
"c4": np.array([0.7]),
"c5": np.array([0.8]),
}[cid]
)
expected_x = {
"type-a": np.array(
[
[1.0, 0.0],
[0.5, 0.5],
[0.3, 0.4],
]
),
"type-b": np.array(
[
[1.0],
[0.7],
[0.8],
]
),
}
expected_y = {
"type-a": np.array(
[
[True, False],
[True, False],
[False, True],
]
),
"type-b": np.array(
[
[False, True],
[True, False],
[True, False],
]
),
}
# Should build X and Y matrices correctly
actual_x, actual_y = component.x_y(instances)
for category in ["type-a", "type-b"]:
np.testing.assert_array_equal(actual_x[category], expected_x[category])
np.testing.assert_array_equal(actual_y[category], expected_y[category])
# Should pass along X and Y matrices to classifiers
component.fit(instances)
for category in ["type-a", "type-b"]:
actual_x = component.classifiers[category].fit.call_args[0][0]
actual_y = component.classifiers[category].fit.call_args[0][1]
np.testing.assert_array_equal(actual_x, expected_x[category])
np.testing.assert_array_equal(actual_y, expected_y[category])
assert component.predict(expected_x) == {
"type-a": [
[False, True],
],
"type-b": [
[True, False],
[False, True],
],
}
ev = component.evaluate(instances[1])
assert ev["True positive"] == 1
assert ev["True negative"] == 1
assert ev["False positive"] == 1
assert ev["False negative"] == 0
def test_x_multiple_solves():
instance = Mock(spec=Instance)
instance.training_data = [
TrainingSample(
slacks={
"c1": 0.00,
"c2": 0.05,
"c3": 0.00,
"c4": 30.0,
}
),
TrainingSample(
slacks={
"c1": 0.00,
"c2": 0.00,
"c3": 1.00,
"c4": 0.0,
}
),
]
instance.get_constraint_category = Mock(
side_effect=lambda cid: {
"c1": None,
"c2": "type-a",
"c3": "type-a",
"c4": "type-b",
}[cid]
)
instance.get_constraint_features = Mock(
side_effect=lambda cid: {
"c2": np.array([1.0, 0.0]),
"c3": np.array([0.5, 0.5]),
"c4": np.array([1.0]),
}[cid]
)
expected_x = {
"type-a": np.array(
[
[1.0, 0.0],
[0.5, 0.5],
[1.0, 0.0],
[0.5, 0.5],
]
),
"type-b": np.array(
[
[1.0],
[1.0],
]
),
}
expected_y = {
"type-a": np.array(
[
[False, True],
[True, False],
[True, False],
[False, True],
]
),
"type-b": np.array(
[
[False, True],
[True, False],
]
),
}
# Should build X and Y matrices correctly
component = DropRedundantInequalitiesStep()
actual_x, actual_y = component.x_y([instance])
for category in ["type-a", "type-b"]:
np.testing.assert_array_equal(actual_x[category], expected_x[category])
np.testing.assert_array_equal(actual_y[category], expected_y[category])
def test_usage():
for internal_solver in [GurobiSolver]:
for instance in [
get_instance_with_redundancy(internal_solver),
get_infeasible_instance(internal_solver),
]:
solver = LearningSolver(
solver=internal_solver,
components=[
RelaxIntegralityStep(),
DropRedundantInequalitiesStep(),
],
)
# The following should not crash
solver.solve(instance)
solver.fit([instance])
solver.solve(instance)
Loading…
Cancel
Save