commit
aecc3a311f
@ -0,0 +1,214 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
import logging
|
||||
from copy import deepcopy
|
||||
|
||||
import numpy as np
|
||||
from tqdm import tqdm
|
||||
import random
|
||||
|
||||
from ... import Component
|
||||
from ...classifiers.counting import CountingClassifier
|
||||
from ...components import classifier_evaluation_dict
|
||||
from ...extractors import InstanceIterator
|
||||
from .drop_redundant import DropRedundantInequalitiesStep
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ConvertTightIneqsIntoEqsStep(Component):
|
||||
"""
|
||||
Component that predicts which inequality constraints are likely to be binding in
|
||||
the LP relaxation of the problem and converts them into equality constraints.
|
||||
|
||||
This component always makes sure that the conversion process does not affect the
|
||||
feasibility of the problem. It can also, optionally, make sure that it does not affect
|
||||
the optimality, but this may be expensive.
|
||||
|
||||
This component does not work on MIPs. All integrality constraints must be relaxed
|
||||
before this component is used.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
classifier=CountingClassifier(),
|
||||
threshold=0.95,
|
||||
slack_tolerance=0.0,
|
||||
check_optimality=False,
|
||||
):
|
||||
self.classifiers = {}
|
||||
self.classifier_prototype = classifier
|
||||
self.threshold = threshold
|
||||
self.slack_tolerance = slack_tolerance
|
||||
self.check_optimality = check_optimality
|
||||
self.converted = []
|
||||
self.original_sense = {}
|
||||
|
||||
def before_solve(self, solver, instance, _):
|
||||
logger.info("Predicting tight LP constraints...")
|
||||
x, constraints = DropRedundantInequalitiesStep._x_test(
|
||||
instance,
|
||||
constraint_ids=solver.internal_solver.get_constraint_ids(),
|
||||
)
|
||||
y = self.predict(x)
|
||||
|
||||
self.n_converted = 0
|
||||
self.n_restored = 0
|
||||
self.n_kept = 0
|
||||
self.n_infeasible_iterations = 0
|
||||
self.n_suboptimal_iterations = 0
|
||||
for category in y.keys():
|
||||
for i in range(len(y[category])):
|
||||
if y[category][i][0] == 1:
|
||||
cid = constraints[category][i]
|
||||
s = solver.internal_solver.get_constraint_sense(cid)
|
||||
self.original_sense[cid] = s
|
||||
solver.internal_solver.set_constraint_sense(cid, "=")
|
||||
self.converted += [cid]
|
||||
self.n_converted += 1
|
||||
else:
|
||||
self.n_kept += 1
|
||||
|
||||
logger.info(f"Converted {self.n_converted} inequalities")
|
||||
|
||||
def after_solve(
|
||||
self,
|
||||
solver,
|
||||
instance,
|
||||
model,
|
||||
stats,
|
||||
training_data,
|
||||
):
|
||||
if "slacks" not in training_data.keys():
|
||||
training_data["slacks"] = solver.internal_solver.get_inequality_slacks()
|
||||
stats["ConvertTight: Kept"] = self.n_kept
|
||||
stats["ConvertTight: Converted"] = self.n_converted
|
||||
stats["ConvertTight: Restored"] = self.n_restored
|
||||
stats["ConvertTight: Inf iterations"] = self.n_infeasible_iterations
|
||||
stats["ConvertTight: Subopt iterations"] = self.n_suboptimal_iterations
|
||||
|
||||
def fit(self, training_instances):
|
||||
logger.debug("Extracting x and y...")
|
||||
x = self.x(training_instances)
|
||||
y = self.y(training_instances)
|
||||
logger.debug("Fitting...")
|
||||
for category in tqdm(x.keys(), desc="Fit (rlx:conv_ineqs)"):
|
||||
if category not in self.classifiers:
|
||||
self.classifiers[category] = deepcopy(self.classifier_prototype)
|
||||
self.classifiers[category].fit(x[category], y[category])
|
||||
|
||||
def x(self, instances):
|
||||
return DropRedundantInequalitiesStep._x_train(instances)
|
||||
|
||||
def y(self, instances):
|
||||
y = {}
|
||||
for instance in tqdm(
|
||||
InstanceIterator(instances),
|
||||
desc="Extract (rlx:conv_ineqs:y)",
|
||||
disable=len(instances) < 5,
|
||||
):
|
||||
for (cid, slack) in instance.training_data[0]["slacks"].items():
|
||||
category = instance.get_constraint_category(cid)
|
||||
if category is None:
|
||||
continue
|
||||
if category not in y:
|
||||
y[category] = []
|
||||
if 0 <= slack <= self.slack_tolerance:
|
||||
y[category] += [[1]]
|
||||
else:
|
||||
y[category] += [[0]]
|
||||
return y
|
||||
|
||||
def predict(self, x):
|
||||
y = {}
|
||||
for (category, x_cat) in x.items():
|
||||
if category not in self.classifiers:
|
||||
continue
|
||||
y[category] = []
|
||||
x_cat = np.array(x_cat)
|
||||
proba = self.classifiers[category].predict_proba(x_cat)
|
||||
for i in range(len(proba)):
|
||||
if proba[i][1] >= self.threshold:
|
||||
y[category] += [[1]]
|
||||
else:
|
||||
y[category] += [[0]]
|
||||
return y
|
||||
|
||||
def evaluate(self, instance):
|
||||
x = self.x([instance])
|
||||
y_true = self.y([instance])
|
||||
y_pred = self.predict(x)
|
||||
tp, tn, fp, fn = 0, 0, 0, 0
|
||||
for category in y_true.keys():
|
||||
for i in range(len(y_true[category])):
|
||||
if y_pred[category][i][0] == 1:
|
||||
if y_true[category][i][0] == 1:
|
||||
tp += 1
|
||||
else:
|
||||
fp += 1
|
||||
else:
|
||||
if y_true[category][i][0] == 1:
|
||||
fn += 1
|
||||
else:
|
||||
tn += 1
|
||||
return classifier_evaluation_dict(tp, tn, fp, fn)
|
||||
|
||||
def iteration_cb(self, solver, instance, model):
|
||||
is_infeasible, is_suboptimal = False, False
|
||||
restored = []
|
||||
|
||||
def check_pi(msense, csense, pi):
|
||||
if csense == "=":
|
||||
return True
|
||||
if msense == "max":
|
||||
if csense == "<":
|
||||
return pi >= 0
|
||||
else:
|
||||
return pi <= 0
|
||||
else:
|
||||
if csense == ">":
|
||||
return pi >= 0
|
||||
else:
|
||||
return pi <= 0
|
||||
|
||||
def restore(cid):
|
||||
nonlocal restored
|
||||
csense = self.original_sense[cid]
|
||||
solver.internal_solver.set_constraint_sense(cid, csense)
|
||||
restored += [cid]
|
||||
|
||||
if solver.internal_solver.is_infeasible():
|
||||
for cid in self.converted:
|
||||
pi = solver.internal_solver.get_dual(cid)
|
||||
if abs(pi) > 0:
|
||||
is_infeasible = True
|
||||
restore(cid)
|
||||
elif self.check_optimality:
|
||||
random.shuffle(self.converted)
|
||||
n_restored = 0
|
||||
for cid in self.converted:
|
||||
if n_restored >= 100:
|
||||
break
|
||||
pi = solver.internal_solver.get_dual(cid)
|
||||
csense = self.original_sense[cid]
|
||||
msense = solver.internal_solver.get_sense()
|
||||
if not check_pi(msense, csense, pi):
|
||||
is_suboptimal = True
|
||||
restore(cid)
|
||||
n_restored += 1
|
||||
|
||||
for cid in restored:
|
||||
self.converted.remove(cid)
|
||||
|
||||
if len(restored) > 0:
|
||||
self.n_restored += len(restored)
|
||||
if is_infeasible:
|
||||
self.n_infeasible_iterations += 1
|
||||
if is_suboptimal:
|
||||
self.n_suboptimal_iterations += 1
|
||||
logger.info(f"Restored {len(restored)} inequalities")
|
||||
return True
|
||||
else:
|
||||
return False
|
@ -0,0 +1,228 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
import logging
|
||||
from copy import deepcopy
|
||||
|
||||
import numpy as np
|
||||
from tqdm import tqdm
|
||||
|
||||
from miplearn import Component
|
||||
from miplearn.classifiers.counting import CountingClassifier
|
||||
from miplearn.components import classifier_evaluation_dict
|
||||
from miplearn.components.lazy_static import LazyConstraint
|
||||
from miplearn.extractors import InstanceIterator
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DropRedundantInequalitiesStep(Component):
|
||||
"""
|
||||
Component that predicts which inequalities are likely loose in the LP and removes
|
||||
them. Optionally, double checks after the problem is solved that all dropped
|
||||
inequalities were in fact redundant, and, if not, re-adds them to the problem.
|
||||
|
||||
This component does not work on MIPs. All integrality constraints must be relaxed
|
||||
before this component is used.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
classifier=CountingClassifier(),
|
||||
threshold=0.95,
|
||||
slack_tolerance=1e-5,
|
||||
check_feasibility=False,
|
||||
violation_tolerance=1e-5,
|
||||
max_iterations=3,
|
||||
):
|
||||
self.classifiers = {}
|
||||
self.classifier_prototype = classifier
|
||||
self.threshold = threshold
|
||||
self.slack_tolerance = slack_tolerance
|
||||
self.pool = []
|
||||
self.check_feasibility = check_feasibility
|
||||
self.violation_tolerance = violation_tolerance
|
||||
self.max_iterations = max_iterations
|
||||
self.current_iteration = 0
|
||||
|
||||
def before_solve(self, solver, instance, _):
|
||||
self.current_iteration = 0
|
||||
|
||||
logger.info("Predicting redundant LP constraints...")
|
||||
x, constraints = self._x_test(
|
||||
instance,
|
||||
constraint_ids=solver.internal_solver.get_constraint_ids(),
|
||||
)
|
||||
y = self.predict(x)
|
||||
|
||||
self.total_dropped = 0
|
||||
self.total_restored = 0
|
||||
self.total_kept = 0
|
||||
self.total_iterations = 0
|
||||
for category in y.keys():
|
||||
for i in range(len(y[category])):
|
||||
if y[category][i][0] == 1:
|
||||
cid = constraints[category][i]
|
||||
c = LazyConstraint(
|
||||
cid=cid,
|
||||
obj=solver.internal_solver.extract_constraint(cid),
|
||||
)
|
||||
self.pool += [c]
|
||||
self.total_dropped += 1
|
||||
else:
|
||||
self.total_kept += 1
|
||||
logger.info(f"Extracted {self.total_dropped} predicted constraints")
|
||||
|
||||
def after_solve(
|
||||
self,
|
||||
solver,
|
||||
instance,
|
||||
model,
|
||||
stats,
|
||||
training_data,
|
||||
):
|
||||
if "slacks" not in training_data.keys():
|
||||
training_data["slacks"] = solver.internal_solver.get_inequality_slacks()
|
||||
stats.update(
|
||||
{
|
||||
"DropRedundant: Kept": self.total_kept,
|
||||
"DropRedundant: Dropped": self.total_dropped,
|
||||
"DropRedundant: Restored": self.total_restored,
|
||||
"DropRedundant: Iterations": self.total_iterations,
|
||||
}
|
||||
)
|
||||
|
||||
def fit(self, training_instances):
|
||||
logger.debug("Extracting x and y...")
|
||||
x = self.x(training_instances)
|
||||
y = self.y(training_instances)
|
||||
logger.debug("Fitting...")
|
||||
for category in tqdm(x.keys(), desc="Fit (rlx:drop_ineq)"):
|
||||
if category not in self.classifiers:
|
||||
self.classifiers[category] = deepcopy(self.classifier_prototype)
|
||||
self.classifiers[category].fit(x[category], y[category])
|
||||
|
||||
@staticmethod
|
||||
def _x_test(instance, constraint_ids):
|
||||
x = {}
|
||||
constraints = {}
|
||||
cids = constraint_ids
|
||||
for cid in cids:
|
||||
category = instance.get_constraint_category(cid)
|
||||
if category is None:
|
||||
continue
|
||||
if category not in x:
|
||||
x[category] = []
|
||||
constraints[category] = []
|
||||
x[category] += [instance.get_constraint_features(cid)]
|
||||
constraints[category] += [cid]
|
||||
for category in x.keys():
|
||||
x[category] = np.array(x[category])
|
||||
return x, constraints
|
||||
|
||||
@staticmethod
|
||||
def _x_train(instances):
|
||||
x = {}
|
||||
for instance in tqdm(
|
||||
InstanceIterator(instances),
|
||||
desc="Extract (rlx:drop_ineq:x)",
|
||||
disable=len(instances) < 5,
|
||||
):
|
||||
for training_data in instance.training_data:
|
||||
cids = training_data["slacks"].keys()
|
||||
for cid in cids:
|
||||
category = instance.get_constraint_category(cid)
|
||||
if category is None:
|
||||
continue
|
||||
if category not in x:
|
||||
x[category] = []
|
||||
x[category] += [instance.get_constraint_features(cid)]
|
||||
for category in x.keys():
|
||||
x[category] = np.array(x[category])
|
||||
return x
|
||||
|
||||
def x(self, instances):
|
||||
return self._x_train(instances)
|
||||
|
||||
def y(self, instances):
|
||||
y = {}
|
||||
for instance in tqdm(
|
||||
InstanceIterator(instances),
|
||||
desc="Extract (rlx:drop_ineq:y)",
|
||||
disable=len(instances) < 5,
|
||||
):
|
||||
for training_data in instance.training_data:
|
||||
for (cid, slack) in training_data["slacks"].items():
|
||||
category = instance.get_constraint_category(cid)
|
||||
if category is None:
|
||||
continue
|
||||
if category not in y:
|
||||
y[category] = []
|
||||
if slack > self.slack_tolerance:
|
||||
y[category] += [[1]]
|
||||
else:
|
||||
y[category] += [[0]]
|
||||
return y
|
||||
|
||||
def predict(self, x):
|
||||
y = {}
|
||||
for (category, x_cat) in x.items():
|
||||
if category not in self.classifiers:
|
||||
continue
|
||||
y[category] = []
|
||||
x_cat = np.array(x_cat)
|
||||
proba = self.classifiers[category].predict_proba(x_cat)
|
||||
for i in range(len(proba)):
|
||||
if proba[i][1] >= self.threshold:
|
||||
y[category] += [[1]]
|
||||
else:
|
||||
y[category] += [[0]]
|
||||
return y
|
||||
|
||||
def evaluate(self, instance):
|
||||
x = self.x([instance])
|
||||
y_true = self.y([instance])
|
||||
y_pred = self.predict(x)
|
||||
tp, tn, fp, fn = 0, 0, 0, 0
|
||||
for category in y_true.keys():
|
||||
for i in range(len(y_true[category])):
|
||||
if y_pred[category][i][0] == 1:
|
||||
if y_true[category][i][0] == 1:
|
||||
tp += 1
|
||||
else:
|
||||
fp += 1
|
||||
else:
|
||||
if y_true[category][i][0] == 1:
|
||||
fn += 1
|
||||
else:
|
||||
tn += 1
|
||||
return classifier_evaluation_dict(tp, tn, fp, fn)
|
||||
|
||||
def iteration_cb(self, solver, instance, model):
|
||||
if not self.check_feasibility:
|
||||
return False
|
||||
if self.current_iteration >= self.max_iterations:
|
||||
return False
|
||||
self.current_iteration += 1
|
||||
logger.debug("Checking that dropped constraints are satisfied...")
|
||||
constraints_to_add = []
|
||||
for c in self.pool:
|
||||
if not solver.internal_solver.is_constraint_satisfied(
|
||||
c.obj,
|
||||
self.violation_tolerance,
|
||||
):
|
||||
constraints_to_add.append(c)
|
||||
for c in constraints_to_add:
|
||||
self.pool.remove(c)
|
||||
solver.internal_solver.add_constraint(c.obj)
|
||||
if len(constraints_to_add) > 0:
|
||||
self.total_restored += len(constraints_to_add)
|
||||
logger.info(
|
||||
"%8d constraints %8d in the pool"
|
||||
% (len(constraints_to_add), len(self.pool))
|
||||
)
|
||||
self.total_iterations += 1
|
||||
return True
|
||||
else:
|
||||
return False
|
@ -0,0 +1,29 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
import logging
|
||||
|
||||
from miplearn import Component
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RelaxIntegralityStep(Component):
|
||||
"""
|
||||
Component that relaxes all integrality constraints before the problem is solved.
|
||||
"""
|
||||
|
||||
def before_solve(self, solver, instance, _):
|
||||
logger.info("Relaxing integrality...")
|
||||
solver.internal_solver.relax()
|
||||
|
||||
def after_solve(
|
||||
self,
|
||||
solver,
|
||||
instance,
|
||||
model,
|
||||
stats,
|
||||
training_data,
|
||||
):
|
||||
return
|
@ -0,0 +1,121 @@
|
||||
from miplearn import LearningSolver, GurobiSolver, Instance, Classifier
|
||||
from miplearn.components.steps.convert_tight import ConvertTightIneqsIntoEqsStep
|
||||
from miplearn.components.steps.relax_integrality import RelaxIntegralityStep
|
||||
from miplearn.problems.knapsack import GurobiKnapsackInstance
|
||||
|
||||
from unittest.mock import Mock
|
||||
|
||||
|
||||
def test_convert_tight_usage():
|
||||
instance = GurobiKnapsackInstance(
|
||||
weights=[3.0, 5.0, 10.0],
|
||||
prices=[1.0, 1.0, 1.0],
|
||||
capacity=16.0,
|
||||
)
|
||||
solver = LearningSolver(
|
||||
solver=GurobiSolver(),
|
||||
components=[
|
||||
RelaxIntegralityStep(),
|
||||
ConvertTightIneqsIntoEqsStep(),
|
||||
],
|
||||
)
|
||||
|
||||
# Solve original problem
|
||||
solver.solve(instance)
|
||||
original_upper_bound = instance.upper_bound
|
||||
|
||||
# Should collect training data
|
||||
assert instance.training_data[0]["slacks"]["eq_capacity"] == 0.0
|
||||
|
||||
# Fit and resolve
|
||||
solver.fit([instance])
|
||||
stats = solver.solve(instance)
|
||||
|
||||
# Objective value should be the same
|
||||
assert instance.upper_bound == original_upper_bound
|
||||
assert stats["ConvertTight: Inf iterations"] == 0
|
||||
assert stats["ConvertTight: Subopt iterations"] == 0
|
||||
|
||||
|
||||
class TestInstance(Instance):
|
||||
def to_model(self):
|
||||
import gurobipy as grb
|
||||
from gurobipy import GRB
|
||||
|
||||
m = grb.Model("model")
|
||||
x1 = m.addVar(name="x1")
|
||||
x2 = m.addVar(name="x2")
|
||||
m.setObjective(x1 + 2 * x2, grb.GRB.MAXIMIZE)
|
||||
m.addConstr(x1 <= 2, name="c1")
|
||||
m.addConstr(x2 <= 2, name="c2")
|
||||
m.addConstr(x1 + x2 <= 3, name="c2")
|
||||
return m
|
||||
|
||||
|
||||
def test_convert_tight_infeasibility():
|
||||
comp = ConvertTightIneqsIntoEqsStep()
|
||||
comp.classifiers = {
|
||||
"c1": Mock(spec=Classifier),
|
||||
"c2": Mock(spec=Classifier),
|
||||
"c3": Mock(spec=Classifier),
|
||||
}
|
||||
comp.classifiers["c1"].predict_proba = Mock(return_value=[[0, 1]])
|
||||
comp.classifiers["c2"].predict_proba = Mock(return_value=[[0, 1]])
|
||||
comp.classifiers["c3"].predict_proba = Mock(return_value=[[1, 0]])
|
||||
|
||||
solver = LearningSolver(
|
||||
solver=GurobiSolver(params={}),
|
||||
components=[comp],
|
||||
solve_lp_first=False,
|
||||
)
|
||||
instance = TestInstance()
|
||||
stats = solver.solve(instance)
|
||||
assert instance.lower_bound == 5.0
|
||||
assert stats["ConvertTight: Inf iterations"] == 1
|
||||
assert stats["ConvertTight: Subopt iterations"] == 0
|
||||
|
||||
|
||||
def test_convert_tight_suboptimality():
|
||||
comp = ConvertTightIneqsIntoEqsStep(check_optimality=True)
|
||||
comp.classifiers = {
|
||||
"c1": Mock(spec=Classifier),
|
||||
"c2": Mock(spec=Classifier),
|
||||
"c3": Mock(spec=Classifier),
|
||||
}
|
||||
comp.classifiers["c1"].predict_proba = Mock(return_value=[[0, 1]])
|
||||
comp.classifiers["c2"].predict_proba = Mock(return_value=[[1, 0]])
|
||||
comp.classifiers["c3"].predict_proba = Mock(return_value=[[0, 1]])
|
||||
|
||||
solver = LearningSolver(
|
||||
solver=GurobiSolver(params={}),
|
||||
components=[comp],
|
||||
solve_lp_first=False,
|
||||
)
|
||||
instance = TestInstance()
|
||||
stats = solver.solve(instance)
|
||||
assert instance.lower_bound == 5.0
|
||||
assert stats["ConvertTight: Inf iterations"] == 0
|
||||
assert stats["ConvertTight: Subopt iterations"] == 1
|
||||
|
||||
|
||||
def test_convert_tight_optimal():
|
||||
comp = ConvertTightIneqsIntoEqsStep()
|
||||
comp.classifiers = {
|
||||
"c1": Mock(spec=Classifier),
|
||||
"c2": Mock(spec=Classifier),
|
||||
"c3": Mock(spec=Classifier),
|
||||
}
|
||||
comp.classifiers["c1"].predict_proba = Mock(return_value=[[1, 0]])
|
||||
comp.classifiers["c2"].predict_proba = Mock(return_value=[[0, 1]])
|
||||
comp.classifiers["c3"].predict_proba = Mock(return_value=[[0, 1]])
|
||||
|
||||
solver = LearningSolver(
|
||||
solver=GurobiSolver(params={}),
|
||||
components=[comp],
|
||||
solve_lp_first=False,
|
||||
)
|
||||
instance = TestInstance()
|
||||
stats = solver.solve(instance)
|
||||
assert instance.lower_bound == 5.0
|
||||
assert stats["ConvertTight: Inf iterations"] == 0
|
||||
assert stats["ConvertTight: Subopt iterations"] == 0
|
Loading…
Reference in new issue