mirror of
https://github.com/ANL-CEEESA/MIPLearn.git
synced 2025-12-06 01:18:52 -06:00
Move python files to root folder; remove built docs
This commit is contained in:
42
miplearn/components/__init__.py
Normal file
42
miplearn/components/__init__.py
Normal file
@@ -0,0 +1,42 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
|
||||
def classifier_evaluation_dict(tp, tn, fp, fn):
|
||||
p = tp + fn
|
||||
n = fp + tn
|
||||
d = {
|
||||
"Predicted positive": fp + tp,
|
||||
"Predicted negative": fn + tn,
|
||||
"Condition positive": p,
|
||||
"Condition negative": n,
|
||||
"True positive": tp,
|
||||
"True negative": tn,
|
||||
"False positive": fp,
|
||||
"False negative": fn,
|
||||
"Accuracy": (tp + tn) / (p + n),
|
||||
"F1 score": (2 * tp) / (2 * tp + fp + fn),
|
||||
}
|
||||
|
||||
if p > 0:
|
||||
d["Recall"] = tp / p
|
||||
else:
|
||||
d["Recall"] = 1.0
|
||||
|
||||
if tp + fp > 0:
|
||||
d["Precision"] = tp / (tp + fp)
|
||||
else:
|
||||
d["Precision"] = 1.0
|
||||
|
||||
|
||||
t = (p + n) / 100.0
|
||||
d["Predicted positive (%)"] = d["Predicted positive"] / t
|
||||
d["Predicted negative (%)"] = d["Predicted negative"] / t
|
||||
d["Condition positive (%)"] = d["Condition positive"] / t
|
||||
d["Condition negative (%)"] = d["Condition negative"] / t
|
||||
d["True positive (%)"] = d["True positive"] / t
|
||||
d["True negative (%)"] = d["True negative"] / t
|
||||
d["False positive (%)"] = d["False positive"] / t
|
||||
d["False negative (%)"] = d["False negative"] / t
|
||||
return d
|
||||
23
miplearn/components/component.py
Normal file
23
miplearn/components/component.py
Normal file
@@ -0,0 +1,23 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
|
||||
class Component(ABC):
|
||||
"""
|
||||
A Component is an object which adds functionality to a LearningSolver.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def before_solve(self, solver, instance, model):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def after_solve(self, solver, instance, model, results):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def fit(self, training_instances):
|
||||
pass
|
||||
93
miplearn/components/cuts.py
Normal file
93
miplearn/components/cuts.py
Normal file
@@ -0,0 +1,93 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
import sys
|
||||
from copy import deepcopy
|
||||
|
||||
from miplearn.classifiers.counting import CountingClassifier
|
||||
from miplearn.components import classifier_evaluation_dict
|
||||
|
||||
from .component import Component
|
||||
from ..extractors import *
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class UserCutsComponent(Component):
|
||||
"""
|
||||
A component that predicts which user cuts to enforce.
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
classifier=CountingClassifier(),
|
||||
threshold=0.05):
|
||||
self.violations = set()
|
||||
self.count = {}
|
||||
self.n_samples = 0
|
||||
self.threshold = threshold
|
||||
self.classifier_prototype = classifier
|
||||
self.classifiers = {}
|
||||
|
||||
def before_solve(self, solver, instance, model):
|
||||
logger.info("Predicting violated user cuts...")
|
||||
violations = self.predict(instance)
|
||||
logger.info("Enforcing %d cuts..." % len(violations))
|
||||
for v in violations:
|
||||
cut = instance.build_user_cut(model, v)
|
||||
solver.internal_solver.add_constraint(cut)
|
||||
|
||||
def after_solve(self, solver, instance, model, results):
|
||||
pass
|
||||
|
||||
def fit(self, training_instances):
|
||||
logger.debug("Fitting...")
|
||||
features = InstanceFeaturesExtractor().extract(training_instances)
|
||||
|
||||
self.classifiers = {}
|
||||
violation_to_instance_idx = {}
|
||||
for (idx, instance) in enumerate(training_instances):
|
||||
for v in instance.found_violated_user_cuts:
|
||||
if v not in self.classifiers:
|
||||
self.classifiers[v] = deepcopy(self.classifier_prototype)
|
||||
violation_to_instance_idx[v] = []
|
||||
violation_to_instance_idx[v] += [idx]
|
||||
|
||||
for (v, classifier) in tqdm(self.classifiers.items(),
|
||||
desc="Fit (user cuts)",
|
||||
disable=not sys.stdout.isatty(),
|
||||
):
|
||||
logger.debug("Training: %s" % (str(v)))
|
||||
label = np.zeros(len(training_instances))
|
||||
label[violation_to_instance_idx[v]] = 1.0
|
||||
classifier.fit(features, label)
|
||||
|
||||
def predict(self, instance):
|
||||
violations = []
|
||||
features = InstanceFeaturesExtractor().extract([instance])
|
||||
for (v, classifier) in self.classifiers.items():
|
||||
proba = classifier.predict_proba(features)
|
||||
if proba[0][1] > self.threshold:
|
||||
violations += [v]
|
||||
return violations
|
||||
|
||||
def evaluate(self, instances):
|
||||
results = {}
|
||||
all_violations = set()
|
||||
for instance in instances:
|
||||
all_violations |= set(instance.found_violated_user_cuts)
|
||||
for idx in tqdm(range(len(instances)),
|
||||
desc="Evaluate (lazy)",
|
||||
disable=not sys.stdout.isatty(),
|
||||
):
|
||||
instance = instances[idx]
|
||||
condition_positive = set(instance.found_violated_user_cuts)
|
||||
condition_negative = all_violations - condition_positive
|
||||
pred_positive = set(self.predict(instance)) & all_violations
|
||||
pred_negative = all_violations - pred_positive
|
||||
tp = len(pred_positive & condition_positive)
|
||||
tn = len(pred_negative & condition_negative)
|
||||
fp = len(pred_positive & condition_negative)
|
||||
fn = len(pred_negative & condition_positive)
|
||||
results[idx] = classifier_evaluation_dict(tp, tn, fp, fn)
|
||||
return results
|
||||
95
miplearn/components/lazy.py
Normal file
95
miplearn/components/lazy.py
Normal file
@@ -0,0 +1,95 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
import sys
|
||||
from copy import deepcopy
|
||||
|
||||
from miplearn.classifiers.counting import CountingClassifier
|
||||
from miplearn.components import classifier_evaluation_dict
|
||||
|
||||
from .component import Component
|
||||
from ..extractors import *
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LazyConstraintsComponent(Component):
|
||||
"""
|
||||
A component that predicts which lazy constraints to enforce.
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
classifier=CountingClassifier(),
|
||||
threshold=0.05):
|
||||
self.violations = set()
|
||||
self.count = {}
|
||||
self.n_samples = 0
|
||||
self.threshold = threshold
|
||||
self.classifier_prototype = classifier
|
||||
self.classifiers = {}
|
||||
|
||||
def before_solve(self, solver, instance, model):
|
||||
logger.info("Predicting violated lazy constraints...")
|
||||
violations = self.predict(instance)
|
||||
logger.info("Enforcing %d constraints..." % len(violations))
|
||||
for v in violations:
|
||||
cut = instance.build_lazy_constraint(model, v)
|
||||
solver.internal_solver.add_constraint(cut)
|
||||
|
||||
def after_solve(self, solver, instance, model, results):
|
||||
pass
|
||||
|
||||
def fit(self, training_instances):
|
||||
logger.debug("Fitting...")
|
||||
features = InstanceFeaturesExtractor().extract(training_instances)
|
||||
|
||||
self.classifiers = {}
|
||||
violation_to_instance_idx = {}
|
||||
for (idx, instance) in enumerate(training_instances):
|
||||
for v in instance.found_violated_lazy_constraints:
|
||||
if isinstance(v, list):
|
||||
v = tuple(v)
|
||||
if v not in self.classifiers:
|
||||
self.classifiers[v] = deepcopy(self.classifier_prototype)
|
||||
violation_to_instance_idx[v] = []
|
||||
violation_to_instance_idx[v] += [idx]
|
||||
|
||||
for (v, classifier) in tqdm(self.classifiers.items(),
|
||||
desc="Fit (lazy)",
|
||||
disable=not sys.stdout.isatty(),
|
||||
):
|
||||
logger.debug("Training: %s" % (str(v)))
|
||||
label = np.zeros(len(training_instances))
|
||||
label[violation_to_instance_idx[v]] = 1.0
|
||||
classifier.fit(features, label)
|
||||
|
||||
def predict(self, instance):
|
||||
violations = []
|
||||
features = InstanceFeaturesExtractor().extract([instance])
|
||||
for (v, classifier) in self.classifiers.items():
|
||||
proba = classifier.predict_proba(features)
|
||||
if proba[0][1] > self.threshold:
|
||||
violations += [v]
|
||||
return violations
|
||||
|
||||
def evaluate(self, instances):
|
||||
results = {}
|
||||
all_violations = set()
|
||||
for instance in instances:
|
||||
all_violations |= set(instance.found_violated_lazy_constraints)
|
||||
for idx in tqdm(range(len(instances)),
|
||||
desc="Evaluate (lazy)",
|
||||
disable=not sys.stdout.isatty(),
|
||||
):
|
||||
instance = instances[idx]
|
||||
condition_positive = set(instance.found_violated_lazy_constraints)
|
||||
condition_negative = all_violations - condition_positive
|
||||
pred_positive = set(self.predict(instance)) & all_violations
|
||||
pred_negative = all_violations - pred_positive
|
||||
tp = len(pred_positive & condition_positive)
|
||||
tn = len(pred_negative & condition_negative)
|
||||
fp = len(pred_positive & condition_negative)
|
||||
fn = len(pred_negative & condition_positive)
|
||||
results[idx] = classifier_evaluation_dict(tp, tn, fp, fn)
|
||||
return results
|
||||
84
miplearn/components/objective.py
Normal file
84
miplearn/components/objective.py
Normal file
@@ -0,0 +1,84 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
from sklearn.metrics import mean_squared_error, explained_variance_score, max_error, mean_absolute_error, r2_score
|
||||
|
||||
from .. import Component, InstanceFeaturesExtractor, ObjectiveValueExtractor
|
||||
from sklearn.linear_model import LinearRegression
|
||||
from copy import deepcopy
|
||||
import numpy as np
|
||||
import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ObjectiveValueComponent(Component):
|
||||
"""
|
||||
A Component which predicts the optimal objective value of the problem.
|
||||
"""
|
||||
def __init__(self,
|
||||
regressor=LinearRegression()):
|
||||
self.ub_regressor = None
|
||||
self.lb_regressor = None
|
||||
self.regressor_prototype = regressor
|
||||
|
||||
def before_solve(self, solver, instance, model):
|
||||
if self.ub_regressor is not None:
|
||||
lb, ub = self.predict([instance])[0]
|
||||
instance.predicted_ub = ub
|
||||
instance.predicted_lb = lb
|
||||
logger.info("Predicted objective: [%.2f, %.2f]" % (lb, ub))
|
||||
|
||||
def after_solve(self, solver, instance, model, results):
|
||||
if self.ub_regressor is not None:
|
||||
results["Predicted UB"] = instance.predicted_ub
|
||||
results["Predicted LB"] = instance.predicted_lb
|
||||
else:
|
||||
results["Predicted UB"] = None
|
||||
results["Predicted LB"] = None
|
||||
|
||||
def fit(self, training_instances):
|
||||
logger.debug("Extracting features...")
|
||||
features = InstanceFeaturesExtractor().extract(training_instances)
|
||||
ub = ObjectiveValueExtractor(kind="upper bound").extract(training_instances)
|
||||
lb = ObjectiveValueExtractor(kind="lower bound").extract(training_instances)
|
||||
assert ub.shape == (len(training_instances), 1)
|
||||
assert lb.shape == (len(training_instances), 1)
|
||||
self.ub_regressor = deepcopy(self.regressor_prototype)
|
||||
self.lb_regressor = deepcopy(self.regressor_prototype)
|
||||
logger.debug("Fitting ub_regressor...")
|
||||
self.ub_regressor.fit(features, ub.ravel())
|
||||
logger.debug("Fitting ub_regressor...")
|
||||
self.lb_regressor.fit(features, lb.ravel())
|
||||
|
||||
def predict(self, instances):
|
||||
features = InstanceFeaturesExtractor().extract(instances)
|
||||
lb = self.lb_regressor.predict(features)
|
||||
ub = self.ub_regressor.predict(features)
|
||||
assert lb.shape == (len(instances),)
|
||||
assert ub.shape == (len(instances),)
|
||||
return np.array([lb, ub]).T
|
||||
|
||||
def evaluate(self, instances):
|
||||
y_pred = self.predict(instances)
|
||||
y_true = np.array([[inst.lower_bound, inst.upper_bound] for inst in instances])
|
||||
y_true_lb, y_true_ub = y_true[:, 0], y_true[:, 1]
|
||||
y_pred_lb, y_pred_ub = y_pred[:, 1], y_pred[:, 1]
|
||||
ev = {
|
||||
"Lower bound": {
|
||||
"Mean squared error": mean_squared_error(y_true_lb, y_pred_lb),
|
||||
"Explained variance": explained_variance_score(y_true_lb, y_pred_lb),
|
||||
"Max error": max_error(y_true_lb, y_pred_lb),
|
||||
"Mean absolute error": mean_absolute_error(y_true_lb, y_pred_lb),
|
||||
"R2": r2_score(y_true_lb, y_pred_lb),
|
||||
"Median absolute error": mean_absolute_error(y_true_lb, y_pred_lb),
|
||||
},
|
||||
"Upper bound": {
|
||||
"Mean squared error": mean_squared_error(y_true_ub, y_pred_ub),
|
||||
"Explained variance": explained_variance_score(y_true_ub, y_pred_ub),
|
||||
"Max error": max_error(y_true_ub, y_pred_ub),
|
||||
"Mean absolute error": mean_absolute_error(y_true_ub, y_pred_ub),
|
||||
"R2": r2_score(y_true_ub, y_pred_ub),
|
||||
"Median absolute error": mean_absolute_error(y_true_ub, y_pred_ub),
|
||||
},
|
||||
}
|
||||
return ev
|
||||
147
miplearn/components/primal.py
Normal file
147
miplearn/components/primal.py
Normal file
@@ -0,0 +1,147 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from copy import deepcopy
|
||||
import sys
|
||||
|
||||
from .component import Component
|
||||
from ..classifiers.adaptive import AdaptiveClassifier
|
||||
from ..classifiers.threshold import MinPrecisionThreshold, DynamicThreshold
|
||||
from ..components import classifier_evaluation_dict
|
||||
from ..extractors import *
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PrimalSolutionComponent(Component):
|
||||
"""
|
||||
A component that predicts primal solutions.
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
classifier=AdaptiveClassifier(),
|
||||
mode="exact",
|
||||
threshold=MinPrecisionThreshold(0.98)):
|
||||
self.mode = mode
|
||||
self.classifiers = {}
|
||||
self.thresholds = {}
|
||||
self.threshold_prototype = threshold
|
||||
self.classifier_prototype = classifier
|
||||
|
||||
def before_solve(self, solver, instance, model):
|
||||
solution = self.predict(instance)
|
||||
if self.mode == "heuristic":
|
||||
solver.internal_solver.fix(solution)
|
||||
else:
|
||||
solver.internal_solver.set_warm_start(solution)
|
||||
|
||||
def after_solve(self, solver, instance, model, results):
|
||||
pass
|
||||
|
||||
def x(self, training_instances):
|
||||
return VariableFeaturesExtractor().extract(training_instances)
|
||||
|
||||
def y(self, training_instances):
|
||||
return SolutionExtractor().extract(training_instances)
|
||||
|
||||
def fit(self, training_instances, n_jobs=1):
|
||||
logger.debug("Extracting features...")
|
||||
features = VariableFeaturesExtractor().extract(training_instances)
|
||||
solutions = SolutionExtractor().extract(training_instances)
|
||||
|
||||
for category in tqdm(features.keys(),
|
||||
desc="Fit (primal)",
|
||||
disable=not sys.stdout.isatty(),
|
||||
):
|
||||
x_train = features[category]
|
||||
for label in [0, 1]:
|
||||
y_train = solutions[category][:, label].astype(int)
|
||||
|
||||
# If all samples are either positive or negative, make constant predictions
|
||||
y_avg = np.average(y_train)
|
||||
if y_avg < 0.001 or y_avg >= 0.999:
|
||||
self.classifiers[category, label] = round(y_avg)
|
||||
self.thresholds[category, label] = 0.50
|
||||
continue
|
||||
|
||||
# Create a copy of classifier prototype and train it
|
||||
if isinstance(self.classifier_prototype, list):
|
||||
clf = deepcopy(self.classifier_prototype[label])
|
||||
else:
|
||||
clf = deepcopy(self.classifier_prototype)
|
||||
clf.fit(x_train, y_train)
|
||||
|
||||
# Find threshold (dynamic or static)
|
||||
if isinstance(self.threshold_prototype, DynamicThreshold):
|
||||
self.thresholds[category, label] = self.threshold_prototype.find(clf, x_train, y_train)
|
||||
else:
|
||||
self.thresholds[category, label] = deepcopy(self.threshold_prototype)
|
||||
|
||||
self.classifiers[category, label] = clf
|
||||
|
||||
def predict(self, instance):
|
||||
solution = {}
|
||||
x_test = VariableFeaturesExtractor().extract([instance])
|
||||
var_split = Extractor.split_variables(instance)
|
||||
for category in var_split.keys():
|
||||
n = len(var_split[category])
|
||||
for (i, (var, index)) in enumerate(var_split[category]):
|
||||
if var not in solution.keys():
|
||||
solution[var] = {}
|
||||
solution[var][index] = None
|
||||
for label in [0, 1]:
|
||||
if (category, label) not in self.classifiers.keys():
|
||||
continue
|
||||
clf = self.classifiers[category, label]
|
||||
if isinstance(clf, float) or isinstance(clf, int):
|
||||
ws = np.array([[1 - clf, clf] for _ in range(n)])
|
||||
else:
|
||||
ws = clf.predict_proba(x_test[category])
|
||||
assert ws.shape == (n, 2), "ws.shape should be (%d, 2) not %s" % (n, ws.shape)
|
||||
for (i, (var, index)) in enumerate(var_split[category]):
|
||||
if ws[i, 1] >= self.thresholds[category, label]:
|
||||
solution[var][index] = label
|
||||
return solution
|
||||
|
||||
def evaluate(self, instances):
|
||||
ev = {"Fix zero": {},
|
||||
"Fix one": {}}
|
||||
for instance_idx in tqdm(range(len(instances)),
|
||||
desc="Evaluate (primal)",
|
||||
disable=not sys.stdout.isatty(),
|
||||
):
|
||||
instance = instances[instance_idx]
|
||||
solution_actual = instance.solution
|
||||
solution_pred = self.predict(instance)
|
||||
|
||||
vars_all, vars_one, vars_zero = set(), set(), set()
|
||||
pred_one_positive, pred_zero_positive = set(), set()
|
||||
for (varname, var_dict) in solution_actual.items():
|
||||
for (idx, value) in var_dict.items():
|
||||
vars_all.add((varname, idx))
|
||||
if value > 0.5:
|
||||
vars_one.add((varname, idx))
|
||||
else:
|
||||
vars_zero.add((varname, idx))
|
||||
if solution_pred[varname][idx] is not None:
|
||||
if solution_pred[varname][idx] > 0.5:
|
||||
pred_one_positive.add((varname, idx))
|
||||
else:
|
||||
pred_zero_positive.add((varname, idx))
|
||||
pred_one_negative = vars_all - pred_one_positive
|
||||
pred_zero_negative = vars_all - pred_zero_positive
|
||||
|
||||
tp_zero = len(pred_zero_positive & vars_zero)
|
||||
fp_zero = len(pred_zero_positive & vars_one)
|
||||
tn_zero = len(pred_zero_negative & vars_one)
|
||||
fn_zero = len(pred_zero_negative & vars_zero)
|
||||
|
||||
tp_one = len(pred_one_positive & vars_one)
|
||||
fp_one = len(pred_one_positive & vars_zero)
|
||||
tn_one = len(pred_one_negative & vars_zero)
|
||||
fn_one = len(pred_one_negative & vars_one)
|
||||
|
||||
ev["Fix zero"][instance_idx] = classifier_evaluation_dict(tp_zero, tn_zero, fp_zero, fn_zero)
|
||||
ev["Fix one"][instance_idx] = classifier_evaluation_dict(tp_one, tn_one, fp_one, fn_one)
|
||||
return ev
|
||||
3
miplearn/components/tests/__init__.py
Normal file
3
miplearn/components/tests/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
31
miplearn/components/tests/test_cuts.py
Normal file
31
miplearn/components/tests/test_cuts.py
Normal file
@@ -0,0 +1,31 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
import numpy as np
|
||||
import pyomo.environ as pe
|
||||
|
||||
from miplearn import Instance, GurobiPyomoSolver, LearningSolver
|
||||
from miplearn.problems.knapsack import ChallengeA
|
||||
|
||||
|
||||
class CutInstance(Instance):
|
||||
def to_model(self):
|
||||
model = pe.ConcreteModel()
|
||||
model.x = x = pe.Var([0, 1], domain=pe.Binary)
|
||||
model.OBJ = pe.Objective(expr=x[0] + x[1], sense=pe.maximize)
|
||||
model.eq = pe.Constraint(expr=2 * x[0] + 2 * x[1] <= 3)
|
||||
return model
|
||||
|
||||
def get_instance_features(self):
|
||||
return np.zeros(0)
|
||||
|
||||
def get_variable_features(self, var, index):
|
||||
return np.zeros(0)
|
||||
|
||||
|
||||
def test_cut():
|
||||
challenge = ChallengeA()
|
||||
gurobi = GurobiPyomoSolver()
|
||||
solver = LearningSolver(solver=gurobi, time_limit=10)
|
||||
solver.solve(challenge.training_instances[0])
|
||||
# assert False
|
||||
140
miplearn/components/tests/test_lazy.py
Normal file
140
miplearn/components/tests/test_lazy.py
Normal file
@@ -0,0 +1,140 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from unittest.mock import Mock
|
||||
|
||||
import numpy as np
|
||||
from miplearn import LazyConstraintsComponent, LearningSolver, InternalSolver
|
||||
from miplearn.classifiers import Classifier
|
||||
from miplearn.tests import get_test_pyomo_instances
|
||||
from numpy.linalg import norm
|
||||
|
||||
E = 0.1
|
||||
|
||||
|
||||
def test_lazy_fit():
|
||||
instances, models = get_test_pyomo_instances()
|
||||
instances[0].found_violated_lazy_constraints = ["a", "b"]
|
||||
instances[1].found_violated_lazy_constraints = ["b", "c"]
|
||||
classifier = Mock(spec=Classifier)
|
||||
component = LazyConstraintsComponent(classifier=classifier)
|
||||
|
||||
component.fit(instances)
|
||||
|
||||
# Should create one classifier for each violation
|
||||
assert "a" in component.classifiers
|
||||
assert "b" in component.classifiers
|
||||
assert "c" in component.classifiers
|
||||
|
||||
# Should provide correct x_train to each classifier
|
||||
expected_x_train_a = np.array([[67., 21.75, 1287.92], [70., 23.75, 1199.83]])
|
||||
expected_x_train_b = np.array([[67., 21.75, 1287.92], [70., 23.75, 1199.83]])
|
||||
expected_x_train_c = np.array([[67., 21.75, 1287.92], [70., 23.75, 1199.83]])
|
||||
actual_x_train_a = component.classifiers["a"].fit.call_args[0][0]
|
||||
actual_x_train_b = component.classifiers["b"].fit.call_args[0][0]
|
||||
actual_x_train_c = component.classifiers["c"].fit.call_args[0][0]
|
||||
assert norm(expected_x_train_a - actual_x_train_a) < E
|
||||
assert norm(expected_x_train_b - actual_x_train_b) < E
|
||||
assert norm(expected_x_train_c - actual_x_train_c) < E
|
||||
|
||||
# Should provide correct y_train to each classifier
|
||||
expected_y_train_a = np.array([1.0, 0.0])
|
||||
expected_y_train_b = np.array([1.0, 1.0])
|
||||
expected_y_train_c = np.array([0.0, 1.0])
|
||||
actual_y_train_a = component.classifiers["a"].fit.call_args[0][1]
|
||||
actual_y_train_b = component.classifiers["b"].fit.call_args[0][1]
|
||||
actual_y_train_c = component.classifiers["c"].fit.call_args[0][1]
|
||||
assert norm(expected_y_train_a - actual_y_train_a) < E
|
||||
assert norm(expected_y_train_b - actual_y_train_b) < E
|
||||
assert norm(expected_y_train_c - actual_y_train_c) < E
|
||||
|
||||
|
||||
def test_lazy_before():
|
||||
instances, models = get_test_pyomo_instances()
|
||||
instances[0].build_lazy_constraint = Mock(return_value="c1")
|
||||
solver = LearningSolver()
|
||||
solver.internal_solver = Mock(spec=InternalSolver)
|
||||
component = LazyConstraintsComponent(threshold=0.10)
|
||||
component.classifiers = {"a": Mock(spec=Classifier),
|
||||
"b": Mock(spec=Classifier)}
|
||||
component.classifiers["a"].predict_proba = Mock(return_value=[[0.95, 0.05]])
|
||||
component.classifiers["b"].predict_proba = Mock(return_value=[[0.02, 0.80]])
|
||||
|
||||
component.before_solve(solver, instances[0], models[0])
|
||||
|
||||
# Should ask classifier likelihood of each constraint being violated
|
||||
expected_x_test_a = np.array([[67., 21.75, 1287.92]])
|
||||
expected_x_test_b = np.array([[67., 21.75, 1287.92]])
|
||||
actual_x_test_a = component.classifiers["a"].predict_proba.call_args[0][0]
|
||||
actual_x_test_b = component.classifiers["b"].predict_proba.call_args[0][0]
|
||||
assert norm(expected_x_test_a - actual_x_test_a) < E
|
||||
assert norm(expected_x_test_b - actual_x_test_b) < E
|
||||
|
||||
# Should ask instance to generate cut for constraints whose likelihood
|
||||
# of being violated exceeds the threshold
|
||||
instances[0].build_lazy_constraint.assert_called_once_with(models[0], "b")
|
||||
|
||||
# Should ask internal solver to add generated constraint
|
||||
solver.internal_solver.add_constraint.assert_called_once_with("c1")
|
||||
|
||||
|
||||
def test_lazy_evaluate():
|
||||
instances, models = get_test_pyomo_instances()
|
||||
component = LazyConstraintsComponent()
|
||||
component.classifiers = {"a": Mock(spec=Classifier),
|
||||
"b": Mock(spec=Classifier),
|
||||
"c": Mock(spec=Classifier)}
|
||||
component.classifiers["a"].predict_proba = Mock(return_value=[[1.0, 0.0]])
|
||||
component.classifiers["b"].predict_proba = Mock(return_value=[[0.0, 1.0]])
|
||||
component.classifiers["c"].predict_proba = Mock(return_value=[[0.0, 1.0]])
|
||||
|
||||
instances[0].found_violated_lazy_constraints = ["a", "b", "c"]
|
||||
instances[1].found_violated_lazy_constraints = ["b", "d"]
|
||||
assert component.evaluate(instances) == {
|
||||
0: {
|
||||
"Accuracy": 0.75,
|
||||
"F1 score": 0.8,
|
||||
"Precision": 1.0,
|
||||
"Recall": 2/3.,
|
||||
"Predicted positive": 2,
|
||||
"Predicted negative": 2,
|
||||
"Condition positive": 3,
|
||||
"Condition negative": 1,
|
||||
"False negative": 1,
|
||||
"False positive": 0,
|
||||
"True negative": 1,
|
||||
"True positive": 2,
|
||||
"Predicted positive (%)": 50.0,
|
||||
"Predicted negative (%)": 50.0,
|
||||
"Condition positive (%)": 75.0,
|
||||
"Condition negative (%)": 25.0,
|
||||
"False negative (%)": 25.0,
|
||||
"False positive (%)": 0,
|
||||
"True negative (%)": 25.0,
|
||||
"True positive (%)": 50.0,
|
||||
},
|
||||
1: {
|
||||
"Accuracy": 0.5,
|
||||
"F1 score": 0.5,
|
||||
"Precision": 0.5,
|
||||
"Recall": 0.5,
|
||||
"Predicted positive": 2,
|
||||
"Predicted negative": 2,
|
||||
"Condition positive": 2,
|
||||
"Condition negative": 2,
|
||||
"False negative": 1,
|
||||
"False positive": 1,
|
||||
"True negative": 1,
|
||||
"True positive": 1,
|
||||
"Predicted positive (%)": 50.0,
|
||||
"Predicted negative (%)": 50.0,
|
||||
"Condition positive (%)": 50.0,
|
||||
"Condition negative (%)": 50.0,
|
||||
"False negative (%)": 25.0,
|
||||
"False positive (%)": 25.0,
|
||||
"True negative (%)": 25.0,
|
||||
"True positive (%)": 25.0,
|
||||
}
|
||||
}
|
||||
|
||||
47
miplearn/components/tests/test_objective.py
Normal file
47
miplearn/components/tests/test_objective.py
Normal file
@@ -0,0 +1,47 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from unittest.mock import Mock
|
||||
|
||||
import numpy as np
|
||||
from miplearn import ObjectiveValueComponent
|
||||
from miplearn.classifiers import Regressor
|
||||
from miplearn.tests import get_test_pyomo_instances
|
||||
|
||||
|
||||
def test_usage():
|
||||
instances, models = get_test_pyomo_instances()
|
||||
comp = ObjectiveValueComponent()
|
||||
comp.fit(instances)
|
||||
assert instances[0].lower_bound == 1183.0
|
||||
assert instances[0].upper_bound == 1183.0
|
||||
assert np.round(comp.predict(instances), 2).tolist() == [[1183.0, 1183.0],
|
||||
[1070.0, 1070.0]]
|
||||
|
||||
|
||||
def test_obj_evaluate():
|
||||
instances, models = get_test_pyomo_instances()
|
||||
reg = Mock(spec=Regressor)
|
||||
reg.predict = Mock(return_value=np.array([1000.0, 1000.0]))
|
||||
comp = ObjectiveValueComponent(regressor=reg)
|
||||
comp.fit(instances)
|
||||
ev = comp.evaluate(instances)
|
||||
assert ev == {
|
||||
'Lower bound': {
|
||||
'Explained variance': 0.0,
|
||||
'Max error': 183.0,
|
||||
'Mean absolute error': 126.5,
|
||||
'Mean squared error': 19194.5,
|
||||
'Median absolute error': 126.5,
|
||||
'R2': -5.012843605607331,
|
||||
},
|
||||
'Upper bound': {
|
||||
'Explained variance': 0.0,
|
||||
'Max error': 183.0,
|
||||
'Mean absolute error': 126.5,
|
||||
'Mean squared error': 19194.5,
|
||||
'Median absolute error': 126.5,
|
||||
'R2': -5.012843605607331,
|
||||
}
|
||||
}
|
||||
99
miplearn/components/tests/test_primal.py
Normal file
99
miplearn/components/tests/test_primal.py
Normal file
@@ -0,0 +1,99 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from unittest.mock import Mock
|
||||
|
||||
import numpy as np
|
||||
from miplearn import PrimalSolutionComponent
|
||||
from miplearn.classifiers import Classifier
|
||||
from miplearn.tests import get_test_pyomo_instances
|
||||
|
||||
|
||||
def test_predict():
|
||||
instances, models = get_test_pyomo_instances()
|
||||
comp = PrimalSolutionComponent()
|
||||
comp.fit(instances)
|
||||
solution = comp.predict(instances[0])
|
||||
assert "x" in solution
|
||||
assert 0 in solution["x"]
|
||||
assert 1 in solution["x"]
|
||||
assert 2 in solution["x"]
|
||||
assert 3 in solution["x"]
|
||||
|
||||
|
||||
def test_evaluate():
|
||||
instances, models = get_test_pyomo_instances()
|
||||
clf_zero = Mock(spec=Classifier)
|
||||
clf_zero.predict_proba = Mock(return_value=np.array([
|
||||
[0., 1.], # x[0]
|
||||
[0., 1.], # x[1]
|
||||
[1., 0.], # x[2]
|
||||
[1., 0.], # x[3]
|
||||
]))
|
||||
clf_one = Mock(spec=Classifier)
|
||||
clf_one.predict_proba = Mock(return_value=np.array([
|
||||
[1., 0.], # x[0] instances[0]
|
||||
[1., 0.], # x[1] instances[0]
|
||||
[0., 1.], # x[2] instances[0]
|
||||
[1., 0.], # x[3] instances[0]
|
||||
]))
|
||||
comp = PrimalSolutionComponent(classifier=[clf_zero, clf_one],
|
||||
threshold=0.50)
|
||||
comp.fit(instances[:1])
|
||||
assert comp.predict(instances[0]) == {"x": {0: 0,
|
||||
1: 0,
|
||||
2: 1,
|
||||
3: None}}
|
||||
assert instances[0].solution == {"x": {0: 1,
|
||||
1: 0,
|
||||
2: 1,
|
||||
3: 1}}
|
||||
ev = comp.evaluate(instances[:1])
|
||||
assert ev == {'Fix one': {0: {'Accuracy': 0.5,
|
||||
'Condition negative': 1,
|
||||
'Condition negative (%)': 25.0,
|
||||
'Condition positive': 3,
|
||||
'Condition positive (%)': 75.0,
|
||||
'F1 score': 0.5,
|
||||
'False negative': 2,
|
||||
'False negative (%)': 50.0,
|
||||
'False positive': 0,
|
||||
'False positive (%)': 0.0,
|
||||
'Precision': 1.0,
|
||||
'Predicted negative': 3,
|
||||
'Predicted negative (%)': 75.0,
|
||||
'Predicted positive': 1,
|
||||
'Predicted positive (%)': 25.0,
|
||||
'Recall': 0.3333333333333333,
|
||||
'True negative': 1,
|
||||
'True negative (%)': 25.0,
|
||||
'True positive': 1,
|
||||
'True positive (%)': 25.0}},
|
||||
'Fix zero': {0: {'Accuracy': 0.75,
|
||||
'Condition negative': 3,
|
||||
'Condition negative (%)': 75.0,
|
||||
'Condition positive': 1,
|
||||
'Condition positive (%)': 25.0,
|
||||
'F1 score': 0.6666666666666666,
|
||||
'False negative': 0,
|
||||
'False negative (%)': 0.0,
|
||||
'False positive': 1,
|
||||
'False positive (%)': 25.0,
|
||||
'Precision': 0.5,
|
||||
'Predicted negative': 2,
|
||||
'Predicted negative (%)': 50.0,
|
||||
'Predicted positive': 2,
|
||||
'Predicted positive (%)': 50.0,
|
||||
'Recall': 1.0,
|
||||
'True negative': 2,
|
||||
'True negative (%)': 50.0,
|
||||
'True positive': 1,
|
||||
'True positive (%)': 25.0}}}
|
||||
|
||||
|
||||
def test_primal_parallel_fit():
|
||||
instances, models = get_test_pyomo_instances()
|
||||
comp = PrimalSolutionComponent()
|
||||
comp.fit(instances, n_jobs=2)
|
||||
assert len(comp.classifiers) == 2
|
||||
Reference in New Issue
Block a user