mirror of
https://github.com/ANL-CEEESA/MIPLearn.git
synced 2025-12-06 01:18:52 -06:00
Reorganize directories
This commit is contained in:
@@ -1,19 +0,0 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from .extractors import (SolutionExtractor,
|
||||
InstanceFeaturesExtractor,
|
||||
ObjectiveValueExtractor,
|
||||
VariableFeaturesExtractor,
|
||||
)
|
||||
from .components.component import Component
|
||||
from .components.objective import ObjectiveValueComponent
|
||||
from .components.lazy import LazyConstraintsComponent
|
||||
from .components.primal import (PrimalSolutionComponent,
|
||||
AdaptivePredictor,
|
||||
)
|
||||
from .components.branching import BranchPriorityComponent
|
||||
from .benchmark import BenchmarkRunner
|
||||
from .instance import Instance
|
||||
from .solvers import LearningSolver
|
||||
@@ -1,101 +0,0 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from .solvers import LearningSolver
|
||||
from copy import deepcopy
|
||||
import pandas as pd
|
||||
from tqdm.auto import tqdm
|
||||
|
||||
class BenchmarkRunner:
|
||||
def __init__(self, solvers):
|
||||
assert isinstance(solvers, dict)
|
||||
for solver in solvers.values():
|
||||
assert isinstance(solver, LearningSolver)
|
||||
self.solvers = solvers
|
||||
self.results = None
|
||||
|
||||
def solve(self, instances, fit=True, tee=False):
|
||||
for (name, solver) in self.solvers.items():
|
||||
for i in tqdm(range(len((instances)))):
|
||||
results = solver.solve(deepcopy(instances[i]), tee=tee)
|
||||
self._push_result(results, solver=solver, name=name, instance=i)
|
||||
if fit:
|
||||
solver.fit()
|
||||
|
||||
def parallel_solve(self, instances, n_jobs=1, n_trials=1):
|
||||
instances = instances * n_trials
|
||||
for (name, solver) in self.solvers.items():
|
||||
results = solver.parallel_solve(instances,
|
||||
n_jobs=n_jobs,
|
||||
label="Solve (%s)" % name,
|
||||
collect_training_data=False)
|
||||
for i in range(len(instances)):
|
||||
self._push_result(results[i],
|
||||
solver=solver,
|
||||
name=name,
|
||||
instance=i)
|
||||
|
||||
def raw_results(self):
|
||||
return self.results
|
||||
|
||||
def save_results(self, filename):
|
||||
self.results.to_csv(filename)
|
||||
|
||||
def load_results(self, filename):
|
||||
self.results = pd.read_csv(filename, index_col=0)
|
||||
|
||||
def load_state(self, filename):
|
||||
for (name, solver) in self.solvers.items():
|
||||
solver.load_state(filename)
|
||||
|
||||
def fit(self, training_instances):
|
||||
for (name, solver) in self.solvers.items():
|
||||
solver.fit(training_instances)
|
||||
|
||||
def _push_result(self, result, solver, name, instance):
|
||||
if self.results is None:
|
||||
self.results = pd.DataFrame(columns=["Solver",
|
||||
"Instance",
|
||||
"Wallclock Time",
|
||||
"Lower Bound",
|
||||
"Upper Bound",
|
||||
"Gap",
|
||||
"Nodes",
|
||||
"Mode",
|
||||
"Sense",
|
||||
"Predicted LB",
|
||||
"Predicted UB",
|
||||
])
|
||||
lb = result["Lower bound"]
|
||||
ub = result["Upper bound"]
|
||||
gap = (ub - lb) / lb
|
||||
self.results = self.results.append({
|
||||
"Solver": name,
|
||||
"Instance": instance,
|
||||
"Wallclock Time": result["Wallclock time"],
|
||||
"Lower Bound": lb,
|
||||
"Upper Bound": ub,
|
||||
"Gap": gap,
|
||||
"Nodes": result["Nodes"],
|
||||
"Mode": solver.mode,
|
||||
"Sense": result["Sense"],
|
||||
"Predicted LB": result["Predicted LB"],
|
||||
"Predicted UB": result["Predicted UB"],
|
||||
}, ignore_index=True)
|
||||
groups = self.results.groupby("Instance")
|
||||
best_lower_bound = groups["Lower Bound"].transform("max")
|
||||
best_upper_bound = groups["Upper Bound"].transform("min")
|
||||
best_gap = groups["Gap"].transform("min")
|
||||
best_nodes = groups["Nodes"].transform("min")
|
||||
best_wallclock_time = groups["Wallclock Time"].transform("min")
|
||||
self.results["Relative Lower Bound"] = \
|
||||
self.results["Lower Bound"] / best_lower_bound
|
||||
self.results["Relative Upper Bound"] = \
|
||||
self.results["Upper Bound"] / best_upper_bound
|
||||
self.results["Relative Wallclock Time"] = \
|
||||
self.results["Wallclock Time"] / best_wallclock_time
|
||||
self.results["Relative Gap"] = \
|
||||
self.results["Gap"] / best_gap
|
||||
self.results["Relative Nodes"] = \
|
||||
self.results["Nodes"] / best_nodes
|
||||
@@ -1,4 +0,0 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
@@ -1,70 +0,0 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
import Base.Threads.@threads
|
||||
using TinyBnB, CPLEXW, Printf
|
||||
|
||||
instance_name = ARGS[1]
|
||||
output_filename = ARGS[2]
|
||||
node_limit = parse(Int, ARGS[3])
|
||||
|
||||
mip = open_mip(instance_name)
|
||||
n_vars = CPXgetnumcols(mip.cplex_env[1], mip.cplex_lp[1])
|
||||
|
||||
pseudocost_count_up = [0 for i in 1:n_vars]
|
||||
pseudocost_count_down = [0 for i in 1:n_vars]
|
||||
pseudocost_sum_up = [0. for i in 1:n_vars]
|
||||
pseudocost_sum_down = [0. for i in 1:n_vars]
|
||||
|
||||
function full_strong_branching_track(node::Node, progress::Progress)::TinyBnB.Variable
|
||||
N = length(node.fractional_variables)
|
||||
scores = Array{Float64}(undef, N)
|
||||
rates_up = Array{Float64}(undef, N)
|
||||
rates_down = Array{Float64}(undef, N)
|
||||
|
||||
@threads for v in 1:N
|
||||
fix_vars!(node.mip, node.branch_variables, node.branch_values)
|
||||
obj_up, obj_down = TinyBnB.probe(node.mip, node.fractional_variables[v])
|
||||
unfix_vars!(node.mip, node.branch_variables)
|
||||
delta_up = obj_up - node.obj
|
||||
delta_down = obj_down - node.obj
|
||||
frac_up = ceil(node.fractional_values[v]) - node.fractional_values[v]
|
||||
frac_down = node.fractional_values[v] - floor(node.fractional_values[v])
|
||||
rates_up[v] = delta_up / frac_up
|
||||
rates_down[v] = delta_down / frac_down
|
||||
scores[v] = delta_up * delta_down
|
||||
end
|
||||
|
||||
max_score, max_offset = findmax(scores)
|
||||
selected_var = node.fractional_variables[max_offset]
|
||||
|
||||
if abs(rates_up[max_offset]) < 1e6
|
||||
pseudocost_count_up[selected_var.index] += 1
|
||||
pseudocost_sum_up[selected_var.index] += rates_up[max_offset]
|
||||
end
|
||||
|
||||
if abs(rates_down[max_offset]) < 1e6
|
||||
pseudocost_count_down[selected_var.index] += 1
|
||||
pseudocost_sum_down[selected_var.index] += rates_down[max_offset]
|
||||
end
|
||||
|
||||
return selected_var
|
||||
end
|
||||
|
||||
branch_and_bound(mip,
|
||||
node_limit = node_limit,
|
||||
branch_rule = full_strong_branching_track,
|
||||
node_rule = best_bound,
|
||||
print_interval = 100)
|
||||
|
||||
priority = [(pseudocost_count_up[v] == 0 || pseudocost_count_down[v] == 0) ? 0 :
|
||||
(pseudocost_sum_up[v] / pseudocost_count_up[v]) *
|
||||
(pseudocost_sum_down[v] / pseudocost_count_down[v])
|
||||
for v in 1:n_vars];
|
||||
|
||||
open(output_filename, "w") do file
|
||||
for var in mip.binary_variables
|
||||
write(file, @sprintf("%s,%.0f\n", name(mip, var), priority[var.index]))
|
||||
end
|
||||
end
|
||||
@@ -1,146 +0,0 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from .component import Component
|
||||
from ..extractors import Extractor
|
||||
from abc import ABC, abstractmethod
|
||||
from sklearn.neighbors import KNeighborsRegressor
|
||||
import numpy as np
|
||||
from tqdm.auto import tqdm
|
||||
from joblib import Parallel, delayed
|
||||
import multiprocessing
|
||||
|
||||
|
||||
def _default_branch_priority_predictor():
|
||||
return KNeighborsRegressor(n_neighbors=1)
|
||||
|
||||
|
||||
class BranchPriorityComponent(Component):
|
||||
def __init__(self,
|
||||
node_limit=10_000,
|
||||
predictor=_default_branch_priority_predictor,
|
||||
):
|
||||
self.pending_instances = []
|
||||
self.x_train = {}
|
||||
self.y_train = {}
|
||||
self.predictors = {}
|
||||
self.node_limit = node_limit
|
||||
self.predictor_factory = predictor
|
||||
|
||||
def before_solve(self, solver, instance, model):
|
||||
assert solver.internal_solver.name == "gurobi_persistent", "Only GurobiPersistent is currently supported"
|
||||
from gurobipy import GRB
|
||||
var_split = Extractor.split_variables(instance, model)
|
||||
for category in var_split.keys():
|
||||
if category not in self.predictors.keys():
|
||||
continue
|
||||
var_index_pairs = var_split[category]
|
||||
for (i, (var, index)) in enumerate(var_index_pairs):
|
||||
x = self._build_x(instance, var, index)
|
||||
y = self.predictors[category].predict([x])[0][0]
|
||||
gvar = solver.internal_solver._pyomo_var_to_solver_var_map[var[index]]
|
||||
gvar.setAttr(GRB.Attr.BranchPriority, int(round(y)))
|
||||
|
||||
|
||||
def after_solve(self, solver, instance, model):
|
||||
self.pending_instances += [instance]
|
||||
|
||||
def fit(self, solver, n_jobs=1):
|
||||
def _process(instance):
|
||||
# Create LP file
|
||||
import subprocess, tempfile, os, sys
|
||||
lp_file = tempfile.NamedTemporaryFile(suffix=".lp")
|
||||
priority_file = tempfile.NamedTemporaryFile()
|
||||
model = instance.to_model()
|
||||
model.write(lp_file.name)
|
||||
|
||||
# Run Julia script
|
||||
src_dirname = os.path.dirname(os.path.realpath(__file__))
|
||||
priority_file = tempfile.NamedTemporaryFile(mode="r")
|
||||
subprocess.run(["julia",
|
||||
"%s/branching.jl" % src_dirname,
|
||||
lp_file.name,
|
||||
priority_file.name,
|
||||
str(self.node_limit),
|
||||
],
|
||||
check=True,
|
||||
)
|
||||
|
||||
# Parse output
|
||||
tokens = [line.strip().split(",") for line in priority_file.readlines()]
|
||||
lp_varname_to_priority = {t[0]: int(t[1]) for t in tokens}
|
||||
|
||||
# Map priorities back to Pyomo variables
|
||||
pyomo_var_to_priority = {}
|
||||
from pyomo.core import Var
|
||||
from pyomo.core.base.label import TextLabeler
|
||||
labeler = TextLabeler()
|
||||
symbol_map = list(model.solutions.symbol_map.values())[0]
|
||||
|
||||
# Build x_train and y_train
|
||||
comp = BranchPriorityComponent()
|
||||
for var in model.component_objects(Var):
|
||||
for index in var:
|
||||
category = instance.get_variable_category(var, index)
|
||||
if category is None:
|
||||
continue
|
||||
lp_varname = symbol_map.getSymbol(var[index], labeler)
|
||||
var_priority = lp_varname_to_priority[lp_varname]
|
||||
x = self._build_x(instance, var, index)
|
||||
y = np.array([var_priority])
|
||||
|
||||
if category not in comp.x_train.keys():
|
||||
comp.x_train[category] = np.array([x])
|
||||
comp.y_train[category] = np.array([y])
|
||||
else:
|
||||
comp.x_train[category] = np.vstack([comp.x_train[category], x])
|
||||
comp.y_train[category] = np.vstack([comp.y_train[category], y])
|
||||
|
||||
return comp
|
||||
|
||||
# Run strong branching on pending instances
|
||||
subcomponents = Parallel(n_jobs=n_jobs)(
|
||||
delayed(_process)(instance)
|
||||
for instance in tqdm(self.pending_instances, desc="Branch priority")
|
||||
)
|
||||
self.merge(subcomponents)
|
||||
self.pending_instances.clear()
|
||||
|
||||
# Retrain ML predictors
|
||||
for category in self.x_train.keys():
|
||||
x_train = self.x_train[category]
|
||||
y_train = self.y_train[category]
|
||||
self.predictors[category] = self.predictor_factory()
|
||||
self.predictors[category].fit(x_train, y_train)
|
||||
|
||||
def _build_x(self, instance, var, index):
|
||||
instance_features = instance.get_instance_features()
|
||||
var_features = instance.get_variable_features(var, index)
|
||||
return np.hstack([instance_features, var_features])
|
||||
|
||||
def merge(self, other_components):
|
||||
keys = set(self.x_train.keys())
|
||||
for comp in other_components:
|
||||
self.pending_instances += comp.pending_instances
|
||||
keys = keys.union(set(comp.x_train.keys()))
|
||||
|
||||
# Merge x_train and y_train
|
||||
for key in keys:
|
||||
x_train_submatrices = [comp.x_train[key]
|
||||
for comp in other_components
|
||||
if key in comp.x_train.keys()]
|
||||
y_train_submatrices = [comp.y_train[key]
|
||||
for comp in other_components
|
||||
if key in comp.y_train.keys()]
|
||||
if key in self.x_train.keys():
|
||||
x_train_submatrices += [self.x_train[key]]
|
||||
y_train_submatrices += [self.y_train[key]]
|
||||
self.x_train[key] = np.vstack(x_train_submatrices)
|
||||
self.y_train[key] = np.vstack(y_train_submatrices)
|
||||
|
||||
# Merge trained ML predictors
|
||||
for comp in other_components:
|
||||
for key in comp.predictors.keys():
|
||||
if key not in self.predictors.keys():
|
||||
self.predictors[key] = comp.predictors[key]
|
||||
@@ -1,23 +0,0 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
|
||||
class Component(ABC):
|
||||
"""
|
||||
A Component is an object which adds functionality to a LearningSolver.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def before_solve(self, solver, instance, model):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def after_solve(self, solver, instance, model, results):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def fit(self, training_instances):
|
||||
pass
|
||||
@@ -1,59 +0,0 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from .component import Component
|
||||
from ..extractors import *
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from copy import deepcopy
|
||||
import numpy as np
|
||||
from sklearn.pipeline import make_pipeline
|
||||
from sklearn.linear_model import LogisticRegression
|
||||
from sklearn.preprocessing import StandardScaler
|
||||
from sklearn.model_selection import cross_val_score
|
||||
from sklearn.metrics import roc_curve
|
||||
from sklearn.neighbors import KNeighborsClassifier
|
||||
from tqdm.auto import tqdm
|
||||
import pyomo.environ as pe
|
||||
import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LazyConstraintsComponent(Component):
|
||||
"""
|
||||
A component that predicts which lazy constraints to enforce.
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
threshold=0.05):
|
||||
self.violations = set()
|
||||
self.count = {}
|
||||
self.n_samples = 0
|
||||
self.threshold = threshold
|
||||
|
||||
def before_solve(self, solver, instance, model):
|
||||
logger.info("Enforcing %d lazy constraints" % len(self.violations))
|
||||
for v in self.violations:
|
||||
if self.count[v] < self.n_samples * self.threshold:
|
||||
continue
|
||||
cut = instance.build_lazy_constraint(model, v)
|
||||
solver.internal_solver.add_constraint(cut)
|
||||
|
||||
def after_solve(self, solver, instance, model, results):
|
||||
pass
|
||||
|
||||
def fit(self, training_instances):
|
||||
logger.debug("Fitting...")
|
||||
self.n_samples = len(training_instances)
|
||||
for instance in training_instances:
|
||||
if not hasattr(instance, "found_violations"):
|
||||
continue
|
||||
for v in instance.found_violations:
|
||||
self.violations.add(v)
|
||||
if v not in self.count.keys():
|
||||
self.count[v] = 0
|
||||
self.count[v] += 1
|
||||
|
||||
def predict(self, instance, model=None):
|
||||
return self.violations
|
||||
@@ -1,54 +0,0 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from .. import Component, InstanceFeaturesExtractor, ObjectiveValueExtractor
|
||||
from sklearn.linear_model import LinearRegression
|
||||
from copy import deepcopy
|
||||
import numpy as np
|
||||
import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ObjectiveValueComponent(Component):
|
||||
"""
|
||||
A Component which predicts the optimal objective value of the problem.
|
||||
"""
|
||||
def __init__(self,
|
||||
regressor=LinearRegression()):
|
||||
self.ub_regressor = None
|
||||
self.lb_regressor = None
|
||||
self.regressor_prototype = regressor
|
||||
|
||||
def before_solve(self, solver, instance, model):
|
||||
if self.ub_regressor is not None:
|
||||
lb, ub = self.predict([instance])[0]
|
||||
instance.predicted_ub = ub
|
||||
instance.predicted_lb = lb
|
||||
logger.info("Predicted objective: [%.2f, %.2f]" % (lb, ub))
|
||||
|
||||
def after_solve(self, solver, instance, model, results):
|
||||
if self.ub_regressor is not None:
|
||||
results["Predicted UB"] = instance.predicted_ub
|
||||
results["Predicted LB"] = instance.predicted_lb
|
||||
else:
|
||||
results["Predicted UB"] = None
|
||||
results["Predicted LB"] = None
|
||||
|
||||
def fit(self, training_instances):
|
||||
logger.debug("Extracting features...")
|
||||
features = InstanceFeaturesExtractor().extract(training_instances)
|
||||
ub = ObjectiveValueExtractor(kind="upper bound").extract(training_instances)
|
||||
lb = ObjectiveValueExtractor(kind="lower bound").extract(training_instances)
|
||||
self.ub_regressor = deepcopy(self.regressor_prototype)
|
||||
self.lb_regressor = deepcopy(self.regressor_prototype)
|
||||
logger.debug("Fitting ub_regressor...")
|
||||
self.ub_regressor.fit(features, ub)
|
||||
logger.debug("Fitting ub_regressor...")
|
||||
self.lb_regressor.fit(features, lb)
|
||||
|
||||
def predict(self, instances):
|
||||
features = InstanceFeaturesExtractor().extract(instances)
|
||||
lb = self.lb_regressor.predict(features)
|
||||
ub = self.ub_regressor.predict(features)
|
||||
return np.hstack([lb, ub])
|
||||
@@ -1,201 +0,0 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from .component import Component
|
||||
from ..extractors import *
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from copy import deepcopy
|
||||
import numpy as np
|
||||
from sklearn.pipeline import make_pipeline
|
||||
from sklearn.linear_model import LogisticRegression
|
||||
from sklearn.preprocessing import StandardScaler
|
||||
from sklearn.model_selection import cross_val_score
|
||||
from sklearn.metrics import roc_curve
|
||||
from sklearn.neighbors import KNeighborsClassifier
|
||||
from tqdm.auto import tqdm
|
||||
import pyomo.environ as pe
|
||||
import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AdaptivePredictor:
|
||||
def __init__(self,
|
||||
predictor=None,
|
||||
min_samples_predict=1,
|
||||
min_samples_cv=100,
|
||||
thr_fix=0.999,
|
||||
thr_alpha=0.50,
|
||||
thr_balance=0.95,
|
||||
):
|
||||
self.min_samples_predict = min_samples_predict
|
||||
self.min_samples_cv = min_samples_cv
|
||||
self.thr_fix = thr_fix
|
||||
self.thr_alpha = thr_alpha
|
||||
self.thr_balance = thr_balance
|
||||
self.predictor_factory = predictor
|
||||
|
||||
def fit(self, x_train, y_train):
|
||||
n_samples = x_train.shape[0]
|
||||
|
||||
# If number of samples is too small, don't predict anything.
|
||||
if n_samples < self.min_samples_predict:
|
||||
logger.debug(" Too few samples (%d); always predicting false" % n_samples)
|
||||
self.predictor = 0
|
||||
return
|
||||
|
||||
# If vast majority of observations are false, always return false.
|
||||
y_train_avg = np.average(y_train)
|
||||
if y_train_avg <= 1.0 - self.thr_fix:
|
||||
logger.debug(" Most samples are negative (%.3f); always returning false" % y_train_avg)
|
||||
self.predictor = 0
|
||||
return
|
||||
|
||||
# If vast majority of observations are true, always return true.
|
||||
if y_train_avg >= self.thr_fix:
|
||||
logger.debug(" Most samples are positive (%.3f); always returning true" % y_train_avg)
|
||||
self.predictor = 1
|
||||
return
|
||||
|
||||
# If classes are too unbalanced, don't predict anything.
|
||||
if y_train_avg < (1 - self.thr_balance) or y_train_avg > self.thr_balance:
|
||||
logger.debug(" Classes are too unbalanced (%.3f); always returning false" % y_train_avg)
|
||||
self.predictor = 0
|
||||
return
|
||||
|
||||
# Select ML model if none is provided
|
||||
if self.predictor_factory is None:
|
||||
if n_samples < 30:
|
||||
self.predictor_factory = KNeighborsClassifier(n_neighbors=n_samples)
|
||||
else:
|
||||
self.predictor_factory = make_pipeline(StandardScaler(), LogisticRegression())
|
||||
|
||||
# Create predictor
|
||||
if callable(self.predictor_factory):
|
||||
pred = self.predictor_factory()
|
||||
else:
|
||||
pred = deepcopy(self.predictor_factory)
|
||||
|
||||
# Skip cross-validation if number of samples is too small
|
||||
if n_samples < self.min_samples_cv:
|
||||
logger.debug(" Too few samples (%d); skipping cross validation" % n_samples)
|
||||
self.predictor = pred
|
||||
self.predictor.fit(x_train, y_train)
|
||||
return
|
||||
|
||||
# Calculate cross-validation score
|
||||
cv_score = np.mean(cross_val_score(pred, x_train, y_train, cv=5))
|
||||
dummy_score = max(y_train_avg, 1 - y_train_avg)
|
||||
cv_thr = 1. * self.thr_alpha + dummy_score * (1 - self.thr_alpha)
|
||||
|
||||
# If cross-validation score is too low, don't predict anything.
|
||||
if cv_score < cv_thr:
|
||||
logger.debug(" Score is too low (%.3f < %.3f); always returning false" % (cv_score, cv_thr))
|
||||
self.predictor = 0
|
||||
else:
|
||||
logger.debug(" Score is acceptable (%.3f > %.3f); training classifier" % (cv_score, cv_thr))
|
||||
self.predictor = pred
|
||||
self.predictor.fit(x_train, y_train)
|
||||
|
||||
def predict_proba(self, x_test):
|
||||
if isinstance(self.predictor, int):
|
||||
y_pred = np.zeros((x_test.shape[0], 2))
|
||||
y_pred[:, self.predictor] = 1.0
|
||||
return y_pred
|
||||
else:
|
||||
return self.predictor.predict_proba(x_test)
|
||||
|
||||
|
||||
class PrimalSolutionComponent(Component):
|
||||
"""
|
||||
A component that predicts primal solutions.
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
predictor=AdaptivePredictor(),
|
||||
mode="exact",
|
||||
max_fpr=[1e-3, 1e-3],
|
||||
min_threshold=[0.75, 0.75],
|
||||
dynamic_thresholds=True,
|
||||
):
|
||||
self.mode = mode
|
||||
self.predictors = {}
|
||||
self.is_warm_start_available = False
|
||||
self.max_fpr = max_fpr
|
||||
self.min_threshold = min_threshold
|
||||
self.thresholds = {}
|
||||
self.predictor_factory = predictor
|
||||
self.dynamic_thresholds = dynamic_thresholds
|
||||
|
||||
def before_solve(self, solver, instance, model):
|
||||
solution = self.predict(instance)
|
||||
if self.mode == "heuristic":
|
||||
solver.internal_solver.fix(solution)
|
||||
else:
|
||||
solver.internal_solver.set_warm_start(solution)
|
||||
|
||||
def after_solve(self, solver, instance, model, results):
|
||||
pass
|
||||
|
||||
def fit(self, training_instances):
|
||||
logger.debug("Extracting features...")
|
||||
features = VariableFeaturesExtractor().extract(training_instances)
|
||||
solutions = SolutionExtractor().extract(training_instances)
|
||||
|
||||
for category in tqdm(features.keys(), desc="Fit (Primal)"):
|
||||
x_train = features[category]
|
||||
y_train = solutions[category]
|
||||
for label in [0, 1]:
|
||||
logger.debug("Fitting predictors[%s, %s]:" % (category, label))
|
||||
|
||||
if callable(self.predictor_factory):
|
||||
pred = self.predictor_factory(category, label)
|
||||
else:
|
||||
pred = deepcopy(self.predictor_factory)
|
||||
self.predictors[category, label] = pred
|
||||
y = y_train[:, label].astype(int)
|
||||
pred.fit(x_train, y)
|
||||
|
||||
# If y is either always one or always zero, set fixed threshold
|
||||
y_avg = np.average(y)
|
||||
if (not self.dynamic_thresholds) or y_avg <= 0.001 or y_avg >= 0.999:
|
||||
self.thresholds[category, label] = self.min_threshold[label]
|
||||
logger.debug(" Setting threshold to %.4f" % self.min_threshold[label])
|
||||
continue
|
||||
|
||||
# Calculate threshold dynamically using ROC curve
|
||||
y_scores = pred.predict_proba(x_train)[:, 1]
|
||||
fpr, tpr, thresholds = roc_curve(y, y_scores)
|
||||
k = 0
|
||||
while True:
|
||||
if (k + 1) > len(fpr):
|
||||
break
|
||||
if fpr[k + 1] > self.max_fpr[label]:
|
||||
break
|
||||
if thresholds[k + 1] < self.min_threshold[label]:
|
||||
break
|
||||
k = k + 1
|
||||
logger.debug(" Setting threshold to %.4f (fpr=%.4f, tpr=%.4f)"%
|
||||
(thresholds[k], fpr[k], tpr[k]))
|
||||
self.thresholds[category, label] = thresholds[k]
|
||||
|
||||
|
||||
def predict(self, instance):
|
||||
x_test = VariableFeaturesExtractor().extract([instance])
|
||||
solution = {}
|
||||
var_split = Extractor.split_variables(instance)
|
||||
for category in var_split.keys():
|
||||
for (i, (var, index)) in enumerate(var_split[category]):
|
||||
if var not in solution.keys():
|
||||
solution[var] = {}
|
||||
solution[var][index] = None
|
||||
for label in [0, 1]:
|
||||
if (category, label) not in self.predictors.keys():
|
||||
continue
|
||||
ws = self.predictors[category, label].predict_proba(x_test[category])
|
||||
logger.debug("%s[%s] ws=%.6f threshold=%.6f" %
|
||||
(var, index, ws[i, 1], self.thresholds[category, label]))
|
||||
if ws[i, 1] >= self.thresholds[category, label]:
|
||||
solution[var][index] = label
|
||||
return solution
|
||||
@@ -1,4 +0,0 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
@@ -1,49 +0,0 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from miplearn import BranchPriorityComponent, LearningSolver
|
||||
from miplearn.problems.knapsack import KnapsackInstance
|
||||
import numpy as np
|
||||
import tempfile
|
||||
|
||||
def _get_instances():
|
||||
return [
|
||||
KnapsackInstance(
|
||||
weights=[23., 26., 20., 18.],
|
||||
prices=[505., 352., 458., 220.],
|
||||
capacity=67.,
|
||||
),
|
||||
] * 2
|
||||
|
||||
|
||||
def test_branching():
|
||||
instances = _get_instances()
|
||||
component = BranchPriorityComponent()
|
||||
for instance in instances:
|
||||
component.after_solve(None, instance, None)
|
||||
component.fit(None)
|
||||
for key in ["default"]:
|
||||
assert key in component.x_train.keys()
|
||||
assert key in component.y_train.keys()
|
||||
assert component.x_train[key].shape == (8, 4)
|
||||
assert component.y_train[key].shape == (8, 1)
|
||||
|
||||
|
||||
# def test_branch_priority_save_load():
|
||||
# state_file = tempfile.NamedTemporaryFile(mode="r")
|
||||
# solver = LearningSolver(components={"branch-priority": BranchPriorityComponent()})
|
||||
# solver.parallel_solve(_get_instances(), n_jobs=2)
|
||||
# solver.fit()
|
||||
# comp = solver.components["branch-priority"]
|
||||
# assert comp.x_train["default"].shape == (8, 4)
|
||||
# assert comp.y_train["default"].shape == (8, 1)
|
||||
# assert "default" in comp.predictors.keys()
|
||||
# solver.save_state(state_file.name)
|
||||
#
|
||||
# solver = LearningSolver(components={"branch-priority": BranchPriorityComponent()})
|
||||
# solver.load_state(state_file.name)
|
||||
# comp = solver.components["branch-priority"]
|
||||
# assert comp.x_train["default"].shape == (8, 4)
|
||||
# assert comp.y_train["default"].shape == (8, 1)
|
||||
# assert "default" in comp.predictors.keys()
|
||||
@@ -1,29 +0,0 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from miplearn import ObjectiveValueComponent, LearningSolver
|
||||
from miplearn.problems.knapsack import KnapsackInstance
|
||||
|
||||
def _get_instances():
|
||||
instances = [
|
||||
KnapsackInstance(
|
||||
weights=[23., 26., 20., 18.],
|
||||
prices=[505., 352., 458., 220.],
|
||||
capacity=67.,
|
||||
),
|
||||
]
|
||||
models = [instance.to_model() for instance in instances]
|
||||
solver = LearningSolver()
|
||||
for i in range(len(instances)):
|
||||
solver.solve(instances[i], models[i])
|
||||
return instances, models
|
||||
|
||||
|
||||
def test_usage():
|
||||
instances, models = _get_instances()
|
||||
comp = ObjectiveValueComponent()
|
||||
comp.fit(instances)
|
||||
assert instances[0].lower_bound == 1183.0
|
||||
assert instances[0].upper_bound == 1183.0
|
||||
assert comp.predict(instances).tolist() == [[1183.0, 1183.0]]
|
||||
@@ -1,33 +0,0 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from miplearn import LearningSolver, PrimalSolutionComponent
|
||||
from miplearn.problems.knapsack import KnapsackInstance
|
||||
import numpy as np
|
||||
import tempfile
|
||||
|
||||
|
||||
def _get_instances():
|
||||
instances = [
|
||||
KnapsackInstance(
|
||||
weights=[23., 26., 20., 18.],
|
||||
prices=[505., 352., 458., 220.],
|
||||
capacity=67.,
|
||||
),
|
||||
] * 5
|
||||
models = [inst.to_model() for inst in instances]
|
||||
solver = LearningSolver()
|
||||
for i in range(len(instances)):
|
||||
solver.solve(instances[i], models[i])
|
||||
return instances, models
|
||||
|
||||
|
||||
def test_predict():
|
||||
instances, models = _get_instances()
|
||||
comp = PrimalSolutionComponent()
|
||||
comp.fit(instances)
|
||||
solution = comp.predict(instances[0])
|
||||
assert "x" in solution
|
||||
for idx in range(4):
|
||||
assert idx in solution["x"]
|
||||
@@ -1,105 +0,0 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
import numpy as np
|
||||
from abc import ABC, abstractmethod
|
||||
from pyomo.core import Var
|
||||
from tqdm.auto import tqdm, trange
|
||||
from p_tqdm import p_map
|
||||
import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Extractor(ABC):
|
||||
@abstractmethod
|
||||
def extract(self, instances, models):
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def split_variables(instance):
|
||||
assert hasattr(instance, "lp_solution")
|
||||
result = {}
|
||||
for var_name in instance.lp_solution:
|
||||
for index in instance.lp_solution[var_name]:
|
||||
category = instance.get_variable_category(var_name, index)
|
||||
if category is None:
|
||||
continue
|
||||
if category not in result:
|
||||
result[category] = []
|
||||
result[category] += [(var_name, index)]
|
||||
return result
|
||||
|
||||
|
||||
class VariableFeaturesExtractor(Extractor):
|
||||
def extract(self, instances):
|
||||
result = {}
|
||||
for instance in tqdm(instances,
|
||||
desc="Extract var features",
|
||||
disable=len(instances) < 5):
|
||||
instance_features = instance.get_instance_features()
|
||||
var_split = self.split_variables(instance)
|
||||
for (category, var_index_pairs) in var_split.items():
|
||||
if category not in result:
|
||||
result[category] = []
|
||||
for (var_name, index) in var_index_pairs:
|
||||
result[category] += [
|
||||
instance_features.tolist() + \
|
||||
instance.get_variable_features(var_name, index).tolist() + \
|
||||
[instance.lp_solution[var_name][index]]
|
||||
]
|
||||
for category in result:
|
||||
result[category] = np.array(result[category])
|
||||
return result
|
||||
|
||||
|
||||
class SolutionExtractor(Extractor):
|
||||
def __init__(self, relaxation=False):
|
||||
self.relaxation = relaxation
|
||||
|
||||
def extract(self, instances):
|
||||
result = {}
|
||||
for instance in tqdm(instances,
|
||||
desc="Extract solution",
|
||||
disable=len(instances) < 5):
|
||||
var_split = self.split_variables(instance)
|
||||
for (category, var_index_pairs) in var_split.items():
|
||||
if category not in result:
|
||||
result[category] = []
|
||||
for (var_name, index) in var_index_pairs:
|
||||
if self.relaxation:
|
||||
v = instance.lp_solution[var_name][index]
|
||||
else:
|
||||
v = instance.solution[var_name][index]
|
||||
if v is None:
|
||||
result[category] += [[0, 0]]
|
||||
else:
|
||||
result[category] += [[1 - v, v]]
|
||||
for category in result:
|
||||
result[category] = np.array(result[category])
|
||||
return result
|
||||
|
||||
|
||||
class InstanceFeaturesExtractor(Extractor):
|
||||
def extract(self, instances, models=None):
|
||||
return np.vstack([
|
||||
np.hstack([
|
||||
instance.get_instance_features(),
|
||||
instance.lp_value,
|
||||
])
|
||||
for instance in instances
|
||||
])
|
||||
|
||||
|
||||
class ObjectiveValueExtractor(Extractor):
|
||||
def __init__(self, kind="lp"):
|
||||
assert kind in ["lower bound", "upper bound", "lp"]
|
||||
self.kind = kind
|
||||
|
||||
def extract(self, instances, models=None):
|
||||
if self.kind == "lower bound":
|
||||
return np.array([[instance.lower_bound] for instance in instances])
|
||||
if self.kind == "upper bound":
|
||||
return np.array([[instance.upper_bound] for instance in instances])
|
||||
if self.kind == "lp":
|
||||
return np.array([[instance.lp_value] for instance in instances])
|
||||
@@ -1,114 +0,0 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
|
||||
class Instance(ABC):
|
||||
"""
|
||||
Abstract class holding all the data necessary to generate a concrete model of the problem.
|
||||
|
||||
In the knapsack problem, for example, this class could hold the number of items, their weights
|
||||
and costs, as well as the size of the knapsack. Objects implementing this class are able to
|
||||
convert themselves into a concrete optimization model, which can be optimized by a solver, or
|
||||
into arrays of features, which can be provided as inputs to machine learning models.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def to_model(self):
|
||||
"""
|
||||
Returns a concrete Pyomo model corresponding to this instance.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_instance_features(self):
|
||||
"""
|
||||
Returns a 1-dimensional Numpy array of (numerical) features describing the entire instance.
|
||||
|
||||
The array is used by LearningSolver to determine how similar two instances are. It may also
|
||||
be used to predict, in combination with variable-specific features, the values of binary
|
||||
decision variables in the problem.
|
||||
|
||||
There is not necessarily a one-to-one correspondence between models and instance features:
|
||||
the features may encode only part of the data necessary to generate the complete model.
|
||||
Features may also be statistics computed from the original data. For example, in the
|
||||
knapsack problem, an implementation may decide to provide as instance features only
|
||||
the average weights, average prices, number of items and the size of the knapsack.
|
||||
|
||||
The returned array MUST have the same length for all relevant instances of the problem. If
|
||||
two instances map into arrays of different lengths, they cannot be solved by the same
|
||||
LearningSolver object.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_variable_features(self, var, index):
|
||||
"""
|
||||
Returns a 1-dimensional array of (numerical) features describing a particular decision
|
||||
variable.
|
||||
|
||||
The argument `var` is a pyomo.core.Var object, which represents a collection of decision
|
||||
variables. The argument `index` specifies which variable in the collection is the relevant
|
||||
one.
|
||||
|
||||
In combination with instance features, variable features are used by LearningSolver to
|
||||
predict, among other things, the optimal value of each decision variable before the
|
||||
optimization takes place. In the knapsack problem, for example, an implementation could
|
||||
provide as variable features the weight and the price of a specific item.
|
||||
|
||||
Like instance features, the arrays returned by this method MUST have the same length for
|
||||
all variables within the same category, for all relevant instances of the problem.
|
||||
"""
|
||||
pass
|
||||
|
||||
def get_variable_category(self, var, index):
|
||||
"""
|
||||
Returns the category (a string, an integer or any hashable type) for each decision
|
||||
variable.
|
||||
|
||||
If two variables have the same category, LearningSolver will use the same internal ML
|
||||
model to predict the values of both variables. By default, all variables belong to the
|
||||
"default" category, and therefore only one ML model is used for all variables.
|
||||
|
||||
If the returned category is None, ML models will ignore the variable.
|
||||
"""
|
||||
return "default"
|
||||
|
||||
def find_violations(self, model):
|
||||
"""
|
||||
Returns lazy constraint violations found for the current solution.
|
||||
|
||||
After solving a model, LearningSolver will ask the instance to identify which lazy
|
||||
constraints are violated by the current solution. For each identified violation,
|
||||
LearningSolver will then call the build_lazy_constraint, add the generated Pyomo
|
||||
constraint to the model, then resolve the problem. The process repeats until no further
|
||||
lazy constraint violations are found.
|
||||
|
||||
Each "violation" is simply a string, a tuple or any other hashable type which allows the
|
||||
instance to identify unambiguously which lazy constraint should be generated. In the
|
||||
Traveling Salesman Problem, for example, a subtour violation could be a frozen set
|
||||
containing the cities in the subtour.
|
||||
|
||||
For a concrete example, see TravelingSalesmanInstance.
|
||||
"""
|
||||
return []
|
||||
|
||||
def build_lazy_constraint(self, model, violation):
|
||||
"""
|
||||
Returns a Pyomo constraint which fixes a given violation.
|
||||
|
||||
This method is typically called immediately after find_violations. The violation object
|
||||
provided to this method is exactly the same object returned earlier by find_violations.
|
||||
After some training, LearningSolver may decide to proactively build some lazy constraints
|
||||
at the beginning of the optimization process, before a solution is even available. In this
|
||||
case, build_lazy_constraints will be called without a corresponding call to
|
||||
find_violations.
|
||||
|
||||
The implementation should not directly add the constraint to the model. The constraint
|
||||
will be added by LearningSolver after the method returns.
|
||||
|
||||
For a concrete example, see TravelingSalesmanInstance.
|
||||
"""
|
||||
pass
|
||||
@@ -1,3 +0,0 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
@@ -1,253 +0,0 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
import miplearn
|
||||
from miplearn import Instance
|
||||
import numpy as np
|
||||
import pyomo.environ as pe
|
||||
from scipy.stats import uniform, randint, bernoulli
|
||||
from scipy.stats.distributions import rv_frozen
|
||||
|
||||
|
||||
class ChallengeA:
|
||||
"""
|
||||
- 250 variables, 10 constraints, fixed weights
|
||||
- w ~ U(0, 1000), jitter ~ U(0.95, 1.05)
|
||||
- K = 500, u ~ U(0., 1.)
|
||||
- alpha = 0.25
|
||||
"""
|
||||
def __init__(self,
|
||||
seed=42,
|
||||
n_training_instances=500,
|
||||
n_test_instances=50):
|
||||
|
||||
np.random.seed(seed)
|
||||
self.gen = MultiKnapsackGenerator(n=randint(low=250, high=251),
|
||||
m=randint(low=10, high=11),
|
||||
w=uniform(loc=0.0, scale=1000.0),
|
||||
K=uniform(loc=500.0, scale=0.0),
|
||||
u=uniform(loc=0.0, scale=1.0),
|
||||
alpha=uniform(loc=0.25, scale=0.0),
|
||||
fix_w=True,
|
||||
w_jitter=uniform(loc=0.95, scale=0.1),
|
||||
)
|
||||
np.random.seed(seed + 1)
|
||||
self.training_instances = self.gen.generate(n_training_instances)
|
||||
|
||||
np.random.seed(seed + 2)
|
||||
self.test_instances = self.gen.generate(n_test_instances)
|
||||
|
||||
|
||||
class MultiKnapsackInstance(Instance):
|
||||
"""Representation of the Multidimensional 0-1 Knapsack Problem.
|
||||
|
||||
Given a set of n items and m knapsacks, the problem is to find a subset of items S maximizing
|
||||
sum(prices[i] for i in S). If selected, each item i occupies weights[i,j] units of space in
|
||||
each knapsack j. Furthermore, each knapsack j has limited storage space, given by capacities[j].
|
||||
|
||||
This implementation assigns a different category for each decision variable, and therefore
|
||||
trains one ML model per variable. It is only suitable when training and test instances have
|
||||
same size and items don't shuffle around.
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
prices,
|
||||
capacities,
|
||||
weights):
|
||||
assert isinstance(prices, np.ndarray)
|
||||
assert isinstance(capacities, np.ndarray)
|
||||
assert isinstance(weights, np.ndarray)
|
||||
assert len(weights.shape) == 2
|
||||
self.m, self.n = weights.shape
|
||||
assert prices.shape == (self.n,)
|
||||
assert capacities.shape == (self.m,)
|
||||
self.prices = prices
|
||||
self.capacities = capacities
|
||||
self.weights = weights
|
||||
|
||||
def to_model(self):
|
||||
model = pe.ConcreteModel()
|
||||
model.x = pe.Var(range(self.n), domain=pe.Binary)
|
||||
model.OBJ = pe.Objective(rule=lambda model: sum(model.x[j] * self.prices[j]
|
||||
for j in range(self.n)),
|
||||
sense=pe.maximize)
|
||||
model.eq_capacity = pe.ConstraintList()
|
||||
for i in range(self.m):
|
||||
model.eq_capacity.add(sum(model.x[j] * self.weights[i,j]
|
||||
for j in range(self.n)) <= self.capacities[i])
|
||||
|
||||
return model
|
||||
|
||||
def get_instance_features(self):
|
||||
return np.hstack([
|
||||
np.mean(self.prices),
|
||||
self.capacities,
|
||||
])
|
||||
|
||||
def get_variable_features(self, var, index):
|
||||
return np.hstack([
|
||||
self.prices[index],
|
||||
self.weights[:, index],
|
||||
])
|
||||
|
||||
# def get_variable_category(self, var, index):
|
||||
# return index
|
||||
|
||||
|
||||
class MultiKnapsackGenerator:
|
||||
def __init__(self,
|
||||
n=randint(low=100, high=101),
|
||||
m=randint(low=30, high=31),
|
||||
w=randint(low=0, high=1000),
|
||||
K=randint(low=500, high=500),
|
||||
u=uniform(loc=0.0, scale=1.0),
|
||||
alpha=uniform(loc=0.25, scale=0.0),
|
||||
fix_w=False,
|
||||
w_jitter=uniform(loc=1.0, scale=0.0),
|
||||
round=True,
|
||||
):
|
||||
"""Initialize the problem generator.
|
||||
|
||||
Instances have a random number of items (or variables) and a random number of knapsacks
|
||||
(or constraints), as specified by the provided probability distributions `n` and `m`,
|
||||
respectively. The weight of each item `i` on knapsack `j` is sampled independently from
|
||||
the provided distribution `w`. The capacity of knapsack `j` is set to:
|
||||
|
||||
alpha_j * sum(w[i,j] for i in range(n)),
|
||||
|
||||
where `alpha_j`, the tightness ratio, is sampled from the provided probability
|
||||
distribution `alpha`. To make the instances more challenging, the costs of the items
|
||||
are linearly correlated to their average weights. More specifically, the weight of each
|
||||
item `i` is set to:
|
||||
|
||||
sum(w[i,j]/m for j in range(m)) + K * u_i,
|
||||
|
||||
where `K`, the correlation coefficient, and `u_i`, the correlation multiplier, are sampled
|
||||
from the provided probability distributions. Note that `K` is only sample once for the
|
||||
entire instance.
|
||||
|
||||
If fix_w=True is provided, then w[i,j] are kept the same in all generated instances. This
|
||||
also implies that n and m are kept fixed. Although the prices and capacities are derived
|
||||
from w[i,j], as long as u and K are not constants, the generated instances will still not
|
||||
be completely identical.
|
||||
|
||||
If a probability distribution w_jitter is provided, then item weights will be set to
|
||||
w[i,j] * gamma[i,j] where gamma[i,j] is sampled from w_jitter. When combined with
|
||||
fix_w=True, this argument may be used to generate instances where the weight of each item
|
||||
is roughly the same, but not exactly identical, across all instances. The prices of the
|
||||
items and the capacities of the knapsacks will be calculated as above, but using these
|
||||
perturbed weights instead.
|
||||
|
||||
By default, all generated prices, weights and capacities are rounded to the nearest integer
|
||||
number. If `round=False` is provided, this rounding will be disabled.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
n: rv_discrete
|
||||
Probability distribution for the number of items (or variables)
|
||||
m: rv_discrete
|
||||
Probability distribution for the number of knapsacks (or constraints)
|
||||
w: rv_continuous
|
||||
Probability distribution for the item weights
|
||||
K: rv_continuous
|
||||
Probability distribution for the profit correlation coefficient
|
||||
u: rv_continuous
|
||||
Probability distribution for the profit multiplier
|
||||
alpha: rv_continuous
|
||||
Probability distribution for the tightness ratio
|
||||
fix_w: boolean
|
||||
If true, weights are kept the same (minus the noise from w_jitter) in all instances
|
||||
w_jitter: rv_continuous
|
||||
Probability distribution for random noise added to the weights
|
||||
round: boolean
|
||||
If true, all prices, weights and capacities are rounded to the nearest integer
|
||||
"""
|
||||
assert isinstance(n, rv_frozen), "n should be a SciPy probability distribution"
|
||||
assert isinstance(m, rv_frozen), "m should be a SciPy probability distribution"
|
||||
assert isinstance(w, rv_frozen), "w should be a SciPy probability distribution"
|
||||
assert isinstance(K, rv_frozen), "K should be a SciPy probability distribution"
|
||||
assert isinstance(u, rv_frozen), "u should be a SciPy probability distribution"
|
||||
assert isinstance(alpha, rv_frozen), "alpha should be a SciPy probability distribution"
|
||||
assert isinstance(fix_w, bool), "fix_w should be boolean"
|
||||
assert isinstance(w_jitter, rv_frozen), \
|
||||
"w_jitter should be a SciPy probability distribution"
|
||||
|
||||
self.n = n
|
||||
self.m = m
|
||||
self.w = w
|
||||
self.K = K
|
||||
self.u = u
|
||||
self.alpha = alpha
|
||||
self.w_jitter = w_jitter
|
||||
self.round = round
|
||||
|
||||
if fix_w:
|
||||
self.fix_n = self.n.rvs()
|
||||
self.fix_m = self.m.rvs()
|
||||
self.fix_w = np.array([self.w.rvs(self.fix_n) for _ in range(self.fix_m)])
|
||||
self.fix_u = self.u.rvs(self.fix_n)
|
||||
self.fix_K = self.K.rvs()
|
||||
else:
|
||||
self.fix_n = None
|
||||
self.fix_m = None
|
||||
self.fix_w = None
|
||||
self.fix_u = None
|
||||
self.fix_K = None
|
||||
|
||||
def generate(self, n_samples):
|
||||
def _sample():
|
||||
if self.fix_w is not None:
|
||||
n = self.fix_n
|
||||
m = self.fix_m
|
||||
w = self.fix_w
|
||||
u = self.fix_u
|
||||
K = self.fix_K
|
||||
else:
|
||||
n = self.n.rvs()
|
||||
m = self.m.rvs()
|
||||
w = np.array([self.w.rvs(n) for _ in range(m)])
|
||||
u = self.u.rvs(n)
|
||||
K = self.K.rvs()
|
||||
w = w * np.array([self.w_jitter.rvs(n) for _ in range(m)])
|
||||
alpha = self.alpha.rvs(m)
|
||||
p = np.array([w[:,j].sum() / m + K * u[j] for j in range(n)])
|
||||
b = np.array([w[i,:].sum() * alpha[i] for i in range(m)])
|
||||
if self.round:
|
||||
p = p.round()
|
||||
b = b.round()
|
||||
w = w.round()
|
||||
return MultiKnapsackInstance(p, b, w)
|
||||
return [_sample() for _ in range(n_samples)]
|
||||
|
||||
|
||||
class KnapsackInstance(Instance):
|
||||
"""
|
||||
Simpler (one-dimensional) Knapsack Problem, used for testing.
|
||||
"""
|
||||
def __init__(self, weights, prices, capacity):
|
||||
self.weights = weights
|
||||
self.prices = prices
|
||||
self.capacity = capacity
|
||||
|
||||
def to_model(self):
|
||||
model = pe.ConcreteModel()
|
||||
items = range(len(self.weights))
|
||||
model.x = pe.Var(items, domain=pe.Binary)
|
||||
model.OBJ = pe.Objective(rule=lambda m: sum(m.x[v] * self.prices[v] for v in items),
|
||||
sense=pe.maximize)
|
||||
model.eq_capacity = pe.Constraint(rule=lambda m: sum(m.x[v] * self.weights[v]
|
||||
for v in items) <= self.capacity)
|
||||
return model
|
||||
|
||||
def get_instance_features(self):
|
||||
return np.array([
|
||||
self.capacity,
|
||||
np.average(self.weights),
|
||||
])
|
||||
|
||||
def get_variable_features(self, var, index):
|
||||
return np.array([
|
||||
self.weights[index],
|
||||
self.prices[index],
|
||||
])
|
||||
@@ -1,122 +0,0 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
import numpy as np
|
||||
import pyomo.environ as pe
|
||||
import networkx as nx
|
||||
from miplearn import Instance
|
||||
import random
|
||||
from scipy.stats import uniform, randint, bernoulli
|
||||
from scipy.stats.distributions import rv_frozen
|
||||
|
||||
|
||||
class ChallengeA:
|
||||
def __init__(self,
|
||||
seed=42,
|
||||
n_training_instances=500,
|
||||
n_test_instances=50,
|
||||
):
|
||||
|
||||
np.random.seed(seed)
|
||||
self.generator = MaxWeightStableSetGenerator(w=uniform(loc=100., scale=50.),
|
||||
n=randint(low=200, high=201),
|
||||
p=uniform(loc=0.05, scale=0.0),
|
||||
fix_graph=True)
|
||||
|
||||
np.random.seed(seed + 1)
|
||||
self.training_instances = self.generator.generate(n_training_instances)
|
||||
|
||||
np.random.seed(seed + 2)
|
||||
self.test_instances = self.generator.generate(n_test_instances)
|
||||
|
||||
|
||||
class MaxWeightStableSetGenerator:
|
||||
"""Random instance generator for the Maximum-Weight Stable Set Problem.
|
||||
|
||||
The generator has two modes of operation. When `fix_graph=True` is provided, one random
|
||||
Erdős-Rényi graph $G_{n,p}$ is generated in the constructor, where $n$ and $p$ are sampled
|
||||
from user-provided probability distributions `n` and `p`. To generate each instance, the
|
||||
generator independently samples each $w_v$ from the user-provided probability distribution `w`.
|
||||
|
||||
When `fix_graph=False`, a new random graph is generated for each instance; the remaining
|
||||
parameters are sampled in the same way.
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
w=uniform(loc=10.0, scale=1.0),
|
||||
n=randint(low=250, high=251),
|
||||
p=uniform(loc=0.05, scale=0.0),
|
||||
fix_graph=True):
|
||||
"""Initialize the problem generator.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
w: rv_continuous
|
||||
Probability distribution for vertex weights.
|
||||
n: rv_discrete
|
||||
Probability distribution for parameter $n$ in Erdős-Rényi model.
|
||||
p: rv_continuous
|
||||
Probability distribution for parameter $p$ in Erdős-Rényi model.
|
||||
"""
|
||||
assert isinstance(w, rv_frozen), "w should be a SciPy probability distribution"
|
||||
assert isinstance(n, rv_frozen), "n should be a SciPy probability distribution"
|
||||
assert isinstance(p, rv_frozen), "p should be a SciPy probability distribution"
|
||||
self.w = w
|
||||
self.n = n
|
||||
self.p = p
|
||||
self.fix_graph = fix_graph
|
||||
self.graph = None
|
||||
if fix_graph:
|
||||
self.graph = self._generate_graph()
|
||||
|
||||
def generate(self, n_samples):
|
||||
def _sample():
|
||||
if self.graph is not None:
|
||||
graph = self.graph
|
||||
else:
|
||||
graph = self._generate_graph()
|
||||
weights = self.w.rvs(graph.number_of_nodes())
|
||||
return MaxWeightStableSetInstance(graph, weights)
|
||||
return [_sample() for _ in range(n_samples)]
|
||||
|
||||
def _generate_graph(self):
|
||||
return nx.generators.random_graphs.binomial_graph(self.n.rvs(), self.p.rvs())
|
||||
|
||||
|
||||
class MaxWeightStableSetInstance(Instance):
|
||||
"""An instance of the Maximum-Weight Stable Set Problem.
|
||||
|
||||
Given a graph G=(V,E) and a weight w_v for each vertex v, the problem asks for a stable
|
||||
set S of G maximizing sum(w_v for v in S). A stable set (also called independent set) is
|
||||
a subset of vertices, no two of which are adjacent.
|
||||
|
||||
This is one of Karp's 21 NP-complete problems.
|
||||
"""
|
||||
|
||||
def __init__(self, graph, weights):
|
||||
self.graph = graph
|
||||
self.weights = weights
|
||||
self.model = None
|
||||
|
||||
def to_model(self):
|
||||
nodes = list(self.graph.nodes)
|
||||
edges = list(self.graph.edges)
|
||||
self.model = model = pe.ConcreteModel()
|
||||
model.x = pe.Var(nodes, domain=pe.Binary)
|
||||
model.OBJ = pe.Objective(rule=lambda m : sum(m.x[v] * self.weights[v] for v in nodes),
|
||||
sense=pe.maximize)
|
||||
model.edge_eqs = pe.ConstraintList()
|
||||
for edge in edges:
|
||||
model.edge_eqs.add(model.x[edge[0]] + model.x[edge[1]] <= 1)
|
||||
|
||||
return model
|
||||
|
||||
def get_instance_features(self):
|
||||
return np.array(self.weights)
|
||||
|
||||
def get_variable_features(self, var, index):
|
||||
return np.ones(0)
|
||||
|
||||
def get_variable_category(self, var, index):
|
||||
return index
|
||||
@@ -1,4 +0,0 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
@@ -1,25 +0,0 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from miplearn import LearningSolver
|
||||
from miplearn.problems.knapsack import MultiKnapsackGenerator, MultiKnapsackInstance
|
||||
from scipy.stats import uniform, randint
|
||||
import numpy as np
|
||||
|
||||
|
||||
def test_knapsack_generator():
|
||||
gen = MultiKnapsackGenerator(n=randint(low=100, high=101),
|
||||
m=randint(low=30, high=31),
|
||||
w=randint(low=0, high=1000),
|
||||
K=randint(low=500, high=501),
|
||||
u=uniform(loc=1.0, scale=1.0),
|
||||
alpha=uniform(loc=0.50, scale=0.0),
|
||||
)
|
||||
instances = gen.generate(100)
|
||||
w_sum = sum(instance.weights for instance in instances) / len(instances)
|
||||
p_sum = sum(instance.prices for instance in instances) / len(instances)
|
||||
b_sum = sum(instance.capacities for instance in instances) / len(instances)
|
||||
assert round(np.mean(w_sum), -1) == 500.
|
||||
assert round(np.mean(p_sum), -1) == 1250.
|
||||
assert round(np.mean(b_sum), -3) == 25000.
|
||||
@@ -1,45 +0,0 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from miplearn import LearningSolver
|
||||
from miplearn.problems.stab import MaxWeightStableSetInstance
|
||||
from miplearn.problems.stab import MaxWeightStableSetGenerator
|
||||
import networkx as nx
|
||||
import numpy as np
|
||||
from scipy.stats import uniform, randint
|
||||
|
||||
|
||||
def test_stab():
|
||||
graph = nx.cycle_graph(5)
|
||||
weights = [1., 1., 1., 1., 1.]
|
||||
instance = MaxWeightStableSetInstance(graph, weights)
|
||||
solver = LearningSolver()
|
||||
solver.solve(instance)
|
||||
assert instance.model.OBJ() == 2.
|
||||
|
||||
|
||||
def test_stab_generator_fixed_graph():
|
||||
from miplearn.problems.stab import MaxWeightStableSetGenerator
|
||||
gen = MaxWeightStableSetGenerator(w=uniform(loc=50., scale=10.),
|
||||
n=randint(low=10, high=11),
|
||||
p=uniform(loc=0.05, scale=0.),
|
||||
fix_graph=True)
|
||||
instances = gen.generate(1_000)
|
||||
weights = np.array([instance.weights for instance in instances])
|
||||
weights_avg_actual = np.round(np.average(weights, axis=0))
|
||||
weights_avg_expected = [55.0] * 10
|
||||
assert list(weights_avg_actual) == weights_avg_expected
|
||||
|
||||
|
||||
def test_stab_generator_random_graph():
|
||||
from miplearn.problems.stab import MaxWeightStableSetGenerator
|
||||
gen = MaxWeightStableSetGenerator(w=uniform(loc=50., scale=10.),
|
||||
n=randint(low=30, high=41),
|
||||
p=uniform(loc=0.5, scale=0.),
|
||||
fix_graph=False)
|
||||
instances = gen.generate(1_000)
|
||||
n_nodes = [instance.graph.number_of_nodes() for instance in instances]
|
||||
n_edges = [instance.graph.number_of_edges() for instance in instances]
|
||||
assert np.round(np.mean(n_nodes)) == 35.
|
||||
assert np.round(np.mean(n_edges), -1) == 300.
|
||||
@@ -1,70 +0,0 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from miplearn import LearningSolver
|
||||
from miplearn.problems.tsp import TravelingSalesmanGenerator, TravelingSalesmanInstance
|
||||
import numpy as np
|
||||
from numpy.linalg import norm
|
||||
from scipy.spatial.distance import pdist, squareform
|
||||
from scipy.stats import uniform, randint
|
||||
|
||||
|
||||
def test_generator():
|
||||
instances = TravelingSalesmanGenerator(x=uniform(loc=0.0, scale=1000.0),
|
||||
y=uniform(loc=0.0, scale=1000.0),
|
||||
n=randint(low=100, high=101),
|
||||
gamma=uniform(loc=0.95, scale=0.1),
|
||||
fix_cities=True).generate(100)
|
||||
assert len(instances) == 100
|
||||
assert instances[0].n_cities == 100
|
||||
assert norm(instances[0].distances - instances[0].distances.T) < 1e-6
|
||||
d = [instance.distances[0,1] for instance in instances]
|
||||
assert np.std(d) > 0
|
||||
|
||||
|
||||
def test_instance():
|
||||
n_cities = 4
|
||||
distances = np.array([
|
||||
[0., 1., 2., 1.],
|
||||
[1., 0., 1., 2.],
|
||||
[2., 1., 0., 1.],
|
||||
[1., 2., 1., 0.],
|
||||
])
|
||||
instance = TravelingSalesmanInstance(n_cities, distances)
|
||||
for solver_name in ['gurobi', 'cplex']:
|
||||
solver = LearningSolver(solver=solver_name)
|
||||
solver.solve(instance)
|
||||
x = instance.solution["x"]
|
||||
assert x[0,1] == 1.0
|
||||
assert x[0,2] == 0.0
|
||||
assert x[0,3] == 1.0
|
||||
assert x[1,2] == 1.0
|
||||
assert x[1,3] == 0.0
|
||||
assert x[2,3] == 1.0
|
||||
assert instance.lower_bound == 4.0
|
||||
assert instance.upper_bound == 4.0
|
||||
|
||||
|
||||
def test_subtour():
|
||||
n_cities = 6
|
||||
cities = np.array([
|
||||
[0., 0.],
|
||||
[1., 0.],
|
||||
[2., 0.],
|
||||
[3., 0.],
|
||||
[0., 1.],
|
||||
[3., 1.],
|
||||
])
|
||||
distances = squareform(pdist(cities))
|
||||
instance = TravelingSalesmanInstance(n_cities, distances)
|
||||
for solver_name in ['gurobi', 'cplex']:
|
||||
solver = LearningSolver(solver=solver_name)
|
||||
solver.solve(instance)
|
||||
x = instance.solution["x"]
|
||||
assert x[0,1] == 1.0
|
||||
assert x[0,4] == 1.0
|
||||
assert x[1,2] == 1.0
|
||||
assert x[2,3] == 1.0
|
||||
assert x[3,5] == 1.0
|
||||
assert x[4,5] == 1.0
|
||||
@@ -1,169 +0,0 @@
|
||||
# MIPLearn, an extensible framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2019-2020 Argonne National Laboratory. All rights reserved.
|
||||
# Written by Alinson S. Xavier <axavier@anl.gov>
|
||||
|
||||
import numpy as np
|
||||
import pyomo.environ as pe
|
||||
from miplearn import Instance
|
||||
from scipy.stats import uniform, randint
|
||||
from scipy.spatial.distance import pdist, squareform
|
||||
from scipy.stats.distributions import rv_frozen
|
||||
import networkx as nx
|
||||
import random
|
||||
|
||||
|
||||
class ChallengeA:
|
||||
def __init__(self,
|
||||
seed=42,
|
||||
n_training_instances=500,
|
||||
n_test_instances=50,
|
||||
):
|
||||
|
||||
np.random.seed(seed)
|
||||
self.generator = TravelingSalesmanGenerator(x=uniform(loc=0.0, scale=1000.0),
|
||||
y=uniform(loc=0.0, scale=1000.0),
|
||||
n=randint(low=350, high=351),
|
||||
gamma=uniform(loc=0.95, scale=0.1),
|
||||
fix_cities=True,
|
||||
round=True,
|
||||
)
|
||||
|
||||
np.random.seed(seed + 1)
|
||||
self.training_instances = self.generator.generate(n_training_instances)
|
||||
|
||||
np.random.seed(seed + 2)
|
||||
self.test_instances = self.generator.generate(n_test_instances)
|
||||
|
||||
|
||||
class TravelingSalesmanGenerator:
|
||||
"""Random generator for the Traveling Salesman Problem."""
|
||||
|
||||
def __init__(self,
|
||||
x=uniform(loc=0.0, scale=1000.0),
|
||||
y=uniform(loc=0.0, scale=1000.0),
|
||||
n=randint(low=100, high=101),
|
||||
gamma=uniform(loc=1.0, scale=0.0),
|
||||
fix_cities=True,
|
||||
round=True,
|
||||
):
|
||||
"""Initializes the problem generator.
|
||||
|
||||
Initially, the generator creates n cities (x_1,y_1),...,(x_n,y_n) where n, x_i and y_i are
|
||||
sampled independently from the provided probability distributions `n`, `x` and `y`. For each
|
||||
(unordered) pair of cities (i,j), the distance d[i,j] between them is set to:
|
||||
|
||||
d[i,j] = gamma[i,j] \sqrt{(x_i - x_j)^2 + (y_i - y_j)^2}
|
||||
|
||||
where gamma is sampled from the provided probability distribution `gamma`.
|
||||
|
||||
If fix_cities=True, the list of cities is kept the same for all generated instances. The
|
||||
gamma values, and therefore also the distances, are still different.
|
||||
|
||||
By default, all distances d[i,j] are rounded to the nearest integer. If `round=False`
|
||||
is provided, this rounding will be disabled.
|
||||
|
||||
Arguments
|
||||
---------
|
||||
x: rv_continuous
|
||||
Probability distribution for the x-coordinate of each city.
|
||||
y: rv_continuous
|
||||
Probability distribution for the y-coordinate of each city.
|
||||
n: rv_discrete
|
||||
Probability distribution for the number of cities.
|
||||
fix_cities: bool
|
||||
If False, cities will be resampled for every generated instance. Otherwise, list of
|
||||
cities will be computed once, during the constructor.
|
||||
round: bool
|
||||
If True, distances are rounded to the nearest integer.
|
||||
"""
|
||||
assert isinstance(x, rv_frozen), "x should be a SciPy probability distribution"
|
||||
assert isinstance(y, rv_frozen), "y should be a SciPy probability distribution"
|
||||
assert isinstance(n, rv_frozen), "n should be a SciPy probability distribution"
|
||||
assert isinstance(gamma, rv_frozen), "gamma should be a SciPy probability distribution"
|
||||
self.x = x
|
||||
self.y = y
|
||||
self.n = n
|
||||
self.gamma = gamma
|
||||
self.round = round
|
||||
|
||||
if fix_cities:
|
||||
self.fixed_n, self.fixed_cities = self._generate_cities()
|
||||
else:
|
||||
self.fixed_n = None
|
||||
self.fixed_cities = None
|
||||
|
||||
def generate(self, n_samples):
|
||||
def _sample():
|
||||
if self.fixed_cities is not None:
|
||||
n, cities = self.fixed_n, self.fixed_cities
|
||||
else:
|
||||
n, cities = self._generate_cities()
|
||||
distances = squareform(pdist(cities)) * self.gamma.rvs(size=(n, n))
|
||||
distances = np.tril(distances) + np.triu(distances.T, 1)
|
||||
if self.round:
|
||||
distances = distances.round()
|
||||
return TravelingSalesmanInstance(n, distances)
|
||||
return [_sample() for _ in range(n_samples)]
|
||||
|
||||
def _generate_cities(self):
|
||||
n = self.n.rvs()
|
||||
cities = np.array([(self.x.rvs(), self.y.rvs()) for _ in range(n)])
|
||||
return n, cities
|
||||
|
||||
|
||||
class TravelingSalesmanInstance(Instance):
|
||||
"""An instance ot the Traveling Salesman Problem.
|
||||
|
||||
Given a list of cities and the distance between each pair of cities, the problem asks for the
|
||||
shortest route starting at the first city, visiting each other city exactly once, then
|
||||
returning to the first city. This problem is a generalization of the Hamiltonian path problem,
|
||||
one of Karp's 21 NP-complete problems.
|
||||
"""
|
||||
|
||||
def __init__(self, n_cities, distances):
|
||||
assert isinstance(distances, np.ndarray)
|
||||
assert distances.shape == (n_cities, n_cities)
|
||||
self.n_cities = n_cities
|
||||
self.distances = distances
|
||||
|
||||
def to_model(self):
|
||||
model = pe.ConcreteModel()
|
||||
model.edges = edges = [(i,j)
|
||||
for i in range(self.n_cities)
|
||||
for j in range(i+1, self.n_cities)]
|
||||
model.x = pe.Var(edges, domain=pe.Binary)
|
||||
model.obj = pe.Objective(expr=sum(model.x[i,j] * self.distances[i,j]
|
||||
for (i,j) in edges),
|
||||
sense=pe.minimize)
|
||||
model.eq_degree = pe.ConstraintList()
|
||||
model.eq_subtour = pe.ConstraintList()
|
||||
for i in range(self.n_cities):
|
||||
model.eq_degree.add(sum(model.x[min(i,j), max(i,j)]
|
||||
for j in range(self.n_cities) if i != j) == 2)
|
||||
return model
|
||||
|
||||
def get_instance_features(self):
|
||||
return np.array([1])
|
||||
|
||||
def get_variable_features(self, var_name, index):
|
||||
return np.array([1])
|
||||
|
||||
def get_variable_category(self, var_name, index):
|
||||
return index
|
||||
|
||||
def find_violations(self, model):
|
||||
selected_edges = [e for e in model.edges if model.x[e].value > 0.5]
|
||||
graph = nx.Graph()
|
||||
graph.add_edges_from(selected_edges)
|
||||
components = [frozenset(c) for c in list(nx.connected_components(graph))]
|
||||
violations = []
|
||||
for c in components:
|
||||
if len(c) < self.n_cities:
|
||||
violations += [c]
|
||||
return violations
|
||||
|
||||
def build_lazy_constraint(self, model, component):
|
||||
cut_edges = [e for e in model.edges
|
||||
if (e[0] in component and e[1] not in component) or
|
||||
(e[0] not in component and e[1] in component)]
|
||||
return model.eq_subtour.add(sum(model.x[e] for e in cut_edges) >= 2)
|
||||
@@ -1,361 +0,0 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from . import ObjectiveValueComponent, PrimalSolutionComponent, LazyConstraintsComponent
|
||||
import pyomo.environ as pe
|
||||
from pyomo.core import Var
|
||||
from copy import deepcopy
|
||||
import pickle
|
||||
from scipy.stats import randint
|
||||
from p_tqdm import p_map
|
||||
import numpy as np
|
||||
import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# Global memory for multiprocessing
|
||||
SOLVER = [None]
|
||||
INSTANCES = [None]
|
||||
|
||||
|
||||
def _parallel_solve(instance_idx):
|
||||
solver = deepcopy(SOLVER[0])
|
||||
instance = INSTANCES[0][instance_idx]
|
||||
results = solver.solve(instance)
|
||||
return {
|
||||
"Results": results,
|
||||
"Solution": instance.solution,
|
||||
"LP solution": instance.lp_solution,
|
||||
"LP value": instance.lp_value,
|
||||
"Upper bound": instance.upper_bound,
|
||||
"Lower bound": instance.lower_bound,
|
||||
"Violations": instance.found_violations,
|
||||
}
|
||||
|
||||
|
||||
class InternalSolver:
|
||||
def __init__(self):
|
||||
self.is_warm_start_available = False
|
||||
self.model = None
|
||||
self.var_name_to_var = {}
|
||||
|
||||
def solve_lp(self, tee=False):
|
||||
self.solver.set_instance(self.model)
|
||||
|
||||
# Relax domain
|
||||
from pyomo.core.base.set_types import Reals, Binary
|
||||
original_domains = []
|
||||
for (idx, var) in enumerate(self.model.component_data_objects(Var)):
|
||||
original_domains += [var.domain]
|
||||
lb, ub = var.bounds
|
||||
if var.domain == Binary:
|
||||
var.domain = Reals
|
||||
var.setlb(lb)
|
||||
var.setub(ub)
|
||||
self.solver.update_var(var)
|
||||
|
||||
# Solve LP relaxation
|
||||
results = self.solver.solve(tee=tee)
|
||||
|
||||
# Restore domains
|
||||
for (idx, var) in enumerate(self.model.component_data_objects(Var)):
|
||||
if original_domains[idx] == Binary:
|
||||
var.domain = original_domains[idx]
|
||||
self.solver.update_var(var)
|
||||
|
||||
return {
|
||||
"Optimal value": results["Problem"][0]["Lower bound"],
|
||||
}
|
||||
|
||||
def clear_values(self):
|
||||
for var in self.model.component_objects(Var):
|
||||
for index in var:
|
||||
if var[index].fixed:
|
||||
continue
|
||||
var[index].value = None
|
||||
|
||||
def get_solution(self):
|
||||
solution = {}
|
||||
for var in self.model.component_objects(Var):
|
||||
solution[str(var)] = {}
|
||||
for index in var:
|
||||
solution[str(var)][index] = var[index].value
|
||||
return solution
|
||||
|
||||
def set_warm_start(self, solution):
|
||||
self.is_warm_start_available = True
|
||||
self.clear_values()
|
||||
count_total, count_fixed = 0, 0
|
||||
for var_name in solution:
|
||||
var = self.var_name_to_var[var_name]
|
||||
for index in solution[var_name]:
|
||||
count_total += 1
|
||||
var[index].value = solution[var_name][index]
|
||||
if solution[var_name][index] is not None:
|
||||
count_fixed += 1
|
||||
logger.info("Setting start values for %d variables (out of %d)" %
|
||||
(count_fixed, count_total))
|
||||
|
||||
def set_model(self, model):
|
||||
from pyomo.core.kernel.objective import minimize, maximize
|
||||
self.model = model
|
||||
self.solver.set_instance(model)
|
||||
if self.solver._objective.sense == minimize:
|
||||
self.sense = "min"
|
||||
else:
|
||||
self.sense = "max"
|
||||
self.var_name_to_var = {}
|
||||
self.all_vars = []
|
||||
for var in model.component_objects(Var):
|
||||
self.var_name_to_var[var.name] = var
|
||||
self.all_vars += [var[idx] for idx in var]
|
||||
|
||||
def set_instance(self, instance):
|
||||
self.instance = instance
|
||||
|
||||
def fix(self, solution):
|
||||
count_total, count_fixed = 0, 0
|
||||
for var_name in solution:
|
||||
for index in solution[var_name]:
|
||||
var = self.var_name_to_var[var_name]
|
||||
count_total += 1
|
||||
if solution[var_name][index] is None:
|
||||
continue
|
||||
count_fixed += 1
|
||||
var[index].fix(solution[var_name][index])
|
||||
self.solver.update_var(var[index])
|
||||
logger.info("Fixing values for %d variables (out of %d)" %
|
||||
(count_fixed, count_total))
|
||||
|
||||
def add_constraint(self, cut):
|
||||
self.solver.add_constraint(cut)
|
||||
|
||||
def solve(self, tee=False):
|
||||
total_wallclock_time = 0
|
||||
self.instance.found_violations = []
|
||||
while True:
|
||||
logger.debug("Solving MIP...")
|
||||
results = self.solver.solve(tee=tee)
|
||||
total_wallclock_time += results["Solver"][0]["Wallclock time"]
|
||||
if not hasattr(self.instance, "find_violations"):
|
||||
break
|
||||
logger.debug("Finding violated constraints...")
|
||||
violations = self.instance.find_violations(self.model)
|
||||
if len(violations) == 0:
|
||||
break
|
||||
self.instance.found_violations += violations
|
||||
logger.debug(" %d violations found" % len(violations))
|
||||
for v in violations:
|
||||
cut = self.instance.build_lazy_constraint(self.model, v)
|
||||
self.add_constraint(cut)
|
||||
|
||||
return {
|
||||
"Lower bound": results["Problem"][0]["Lower bound"],
|
||||
"Upper bound": results["Problem"][0]["Upper bound"],
|
||||
"Wallclock time": total_wallclock_time,
|
||||
"Nodes": 1,
|
||||
"Sense": self.sense,
|
||||
}
|
||||
|
||||
|
||||
class GurobiSolver(InternalSolver):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.solver = pe.SolverFactory('gurobi_persistent')
|
||||
self.solver.options["Seed"] = randint(low=0, high=1000).rvs()
|
||||
|
||||
def set_threads(self, threads):
|
||||
self.solver.options["Threads"] = threads
|
||||
|
||||
def set_time_limit(self, time_limit):
|
||||
self.solver.options["TimeLimit"] = time_limit
|
||||
|
||||
def set_gap_tolerance(self, gap_tolerance):
|
||||
self.solver.options["MIPGap"] = gap_tolerance
|
||||
|
||||
def solve(self, tee=False):
|
||||
from gurobipy import GRB
|
||||
def cb(cb_model, cb_opt, cb_where):
|
||||
if cb_where == GRB.Callback.MIPSOL:
|
||||
cb_opt.cbGetSolution(self.all_vars)
|
||||
logger.debug("Finding violated constraints...")
|
||||
violations = self.instance.find_violations(cb_model)
|
||||
self.instance.found_violations += violations
|
||||
logger.debug(" %d violations found" % len(violations))
|
||||
for v in violations:
|
||||
cut = self.instance.build_lazy_constraint(cb_model, v)
|
||||
cb_opt.cbLazy(cut)
|
||||
if hasattr(self.instance, "find_violations"):
|
||||
self.solver.options["LazyConstraints"] = 1
|
||||
self.solver.set_callback(cb)
|
||||
self.instance.found_violations = []
|
||||
results = self.solver.solve(tee=tee, warmstart=self.is_warm_start_available)
|
||||
self.solver.set_callback(None)
|
||||
return {
|
||||
"Lower bound": results["Problem"][0]["Lower bound"],
|
||||
"Upper bound": results["Problem"][0]["Upper bound"],
|
||||
"Wallclock time": results["Solver"][0]["Wallclock time"],
|
||||
"Nodes": self.solver._solver_model.getAttr("NodeCount"),
|
||||
"Sense": self.sense,
|
||||
}
|
||||
|
||||
|
||||
class CPLEXSolver(InternalSolver):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
import cplex
|
||||
self.solver = pe.SolverFactory('cplex_persistent')
|
||||
self.solver.options["randomseed"] = randint(low=0, high=1000).rvs()
|
||||
|
||||
def set_threads(self, threads):
|
||||
self.solver.options["threads"] = threads
|
||||
|
||||
def set_time_limit(self, time_limit):
|
||||
self.solver.options["timelimit"] = time_limit
|
||||
|
||||
def set_gap_tolerance(self, gap_tolerance):
|
||||
self.solver.options["mip_tolerances_mipgap"] = gap_tolerance
|
||||
|
||||
def solve_lp(self, tee=False):
|
||||
import cplex
|
||||
lp = self.solver._solver_model
|
||||
var_types = lp.variables.get_types()
|
||||
n_vars = len(var_types)
|
||||
lp.set_problem_type(cplex.Cplex.problem_type.LP)
|
||||
results = self.solver.solve(tee=tee)
|
||||
lp.variables.set_types(zip(range(n_vars), var_types))
|
||||
return {
|
||||
"Optimal value": results["Problem"][0]["Lower bound"],
|
||||
}
|
||||
|
||||
|
||||
class LearningSolver:
|
||||
"""
|
||||
Mixed-Integer Linear Programming (MIP) solver that extracts information from previous runs,
|
||||
using Machine Learning methods, to accelerate the solution of new (yet unseen) instances.
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
components=None,
|
||||
gap_tolerance=None,
|
||||
mode="exact",
|
||||
solver="gurobi",
|
||||
threads=4,
|
||||
time_limit=None,
|
||||
):
|
||||
|
||||
self.is_persistent = None
|
||||
self.components = components
|
||||
self.mode = mode
|
||||
self.internal_solver = None
|
||||
self.internal_solver_factory = solver
|
||||
self.threads = threads
|
||||
self.time_limit = time_limit
|
||||
self.gap_tolerance = gap_tolerance
|
||||
self.tee = False
|
||||
self.training_instances = []
|
||||
|
||||
if self.components is not None:
|
||||
assert isinstance(self.components, dict)
|
||||
else:
|
||||
self.components = {
|
||||
"ObjectiveValue": ObjectiveValueComponent(),
|
||||
"PrimalSolution": PrimalSolutionComponent(),
|
||||
"LazyConstraints": LazyConstraintsComponent(),
|
||||
}
|
||||
|
||||
assert self.mode in ["exact", "heuristic"]
|
||||
for component in self.components.values():
|
||||
component.mode = self.mode
|
||||
|
||||
def _create_internal_solver(self):
|
||||
logger.debug("Initializing %s" % self.internal_solver_factory)
|
||||
if self.internal_solver_factory == "cplex":
|
||||
solver = CPLEXSolver()
|
||||
elif self.internal_solver_factory == "gurobi":
|
||||
solver = GurobiSolver()
|
||||
else:
|
||||
raise Exception("solver %s not supported" % solver_factory)
|
||||
solver.set_threads(self.threads)
|
||||
if self.time_limit is not None:
|
||||
solver.set_time_limit(self.time_limit)
|
||||
if self.gap_tolerance is not None:
|
||||
solver.set_gap_tolerance(self.gap_tolerance)
|
||||
return solver
|
||||
|
||||
def solve(self,
|
||||
instance,
|
||||
model=None,
|
||||
tee=False,
|
||||
relaxation_only=False,
|
||||
):
|
||||
if model is None:
|
||||
model = instance.to_model()
|
||||
|
||||
self.tee = tee
|
||||
self.internal_solver = self._create_internal_solver()
|
||||
self.internal_solver.set_model(model)
|
||||
self.internal_solver.set_instance(instance)
|
||||
|
||||
logger.debug("Solving LP relaxation...")
|
||||
results = self.internal_solver.solve_lp(tee=tee)
|
||||
instance.lp_solution = self.internal_solver.get_solution()
|
||||
instance.lp_value = results["Optimal value"]
|
||||
|
||||
logger.debug("Running before_solve callbacks...")
|
||||
for component in self.components.values():
|
||||
component.before_solve(self, instance, model)
|
||||
|
||||
if relaxation_only:
|
||||
return results
|
||||
|
||||
results = self.internal_solver.solve(tee=tee)
|
||||
|
||||
# Read MIP solution and bounds
|
||||
instance.lower_bound = results["Lower bound"]
|
||||
instance.upper_bound = results["Upper bound"]
|
||||
instance.solution = self.internal_solver.get_solution()
|
||||
|
||||
logger.debug("Calling after_solve callbacks...")
|
||||
for component in self.components.values():
|
||||
component.after_solve(self, instance, model, results)
|
||||
|
||||
# Store instance for future training
|
||||
self.training_instances += [instance]
|
||||
|
||||
return results
|
||||
|
||||
def parallel_solve(self,
|
||||
instances,
|
||||
n_jobs=4,
|
||||
label="Solve",
|
||||
collect_training_data=True,
|
||||
):
|
||||
|
||||
self.internal_solver = None
|
||||
SOLVER[0] = self
|
||||
INSTANCES[0] = instances
|
||||
p_map_results = p_map(_parallel_solve,
|
||||
list(range(len(instances))),
|
||||
num_cpus=n_jobs,
|
||||
desc=label)
|
||||
|
||||
results = [p["Results"] for p in p_map_results]
|
||||
for (idx, r) in enumerate(p_map_results):
|
||||
instances[idx].solution = r["Solution"]
|
||||
instances[idx].lp_solution = r["LP solution"]
|
||||
instances[idx].lp_value = r["LP value"]
|
||||
instances[idx].lower_bound = r["Lower bound"]
|
||||
instances[idx].upper_bound = r["Upper bound"]
|
||||
instances[idx].found_violations = r["Violations"]
|
||||
|
||||
return results
|
||||
|
||||
def fit(self, training_instances=None):
|
||||
if training_instances is None:
|
||||
training_instances = self.training_instances
|
||||
if len(training_instances) == 0:
|
||||
return
|
||||
for component in self.components.values():
|
||||
component.fit(training_instances)
|
||||
@@ -1,4 +0,0 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
@@ -1,37 +0,0 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from miplearn import LearningSolver, BenchmarkRunner
|
||||
from miplearn.problems.stab import MaxWeightStableSetGenerator
|
||||
from scipy.stats import randint
|
||||
import numpy as np
|
||||
import pyomo.environ as pe
|
||||
import os.path
|
||||
|
||||
|
||||
def test_benchmark():
|
||||
# Generate training and test instances
|
||||
train_instances = MaxWeightStableSetGenerator(n=randint(low=25, high=26)).generate(5)
|
||||
test_instances = MaxWeightStableSetGenerator(n=randint(low=25, high=26)).generate(3)
|
||||
|
||||
# Training phase...
|
||||
training_solver = LearningSolver()
|
||||
training_solver.parallel_solve(train_instances, n_jobs=10)
|
||||
|
||||
# Test phase...
|
||||
test_solvers = {
|
||||
"Strategy A": LearningSolver(),
|
||||
"Strategy B": LearningSolver(),
|
||||
}
|
||||
benchmark = BenchmarkRunner(test_solvers)
|
||||
benchmark.fit(train_instances)
|
||||
benchmark.parallel_solve(test_instances, n_jobs=2, n_trials=2)
|
||||
assert benchmark.raw_results().values.shape == (12,16)
|
||||
|
||||
benchmark.save_results("/tmp/benchmark.csv")
|
||||
assert os.path.isfile("/tmp/benchmark.csv")
|
||||
|
||||
benchmark = BenchmarkRunner(test_solvers)
|
||||
benchmark.load_results("/tmp/benchmark.csv")
|
||||
assert benchmark.raw_results().values.shape == (12,16)
|
||||
@@ -1,62 +0,0 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from miplearn.problems.knapsack import KnapsackInstance
|
||||
from miplearn import (LearningSolver,
|
||||
SolutionExtractor,
|
||||
InstanceFeaturesExtractor,
|
||||
VariableFeaturesExtractor,
|
||||
)
|
||||
import numpy as np
|
||||
import pyomo.environ as pe
|
||||
|
||||
|
||||
def _get_instances():
|
||||
instances = [
|
||||
KnapsackInstance(weights=[1., 2., 3.],
|
||||
prices=[10., 20., 30.],
|
||||
capacity=2.5,
|
||||
),
|
||||
KnapsackInstance(weights=[3., 4., 5.],
|
||||
prices=[20., 30., 40.],
|
||||
capacity=4.5,
|
||||
),
|
||||
]
|
||||
models = [instance.to_model() for instance in instances]
|
||||
solver = LearningSolver()
|
||||
for (i, instance) in enumerate(instances):
|
||||
solver.solve(instances[i], models[i])
|
||||
return instances, models
|
||||
|
||||
|
||||
def test_solution_extractor():
|
||||
instances, models = _get_instances()
|
||||
features = SolutionExtractor().extract(instances)
|
||||
assert isinstance(features, dict)
|
||||
assert "default" in features.keys()
|
||||
assert isinstance(features["default"], np.ndarray)
|
||||
assert features["default"].shape == (6, 2)
|
||||
assert features["default"].ravel().tolist() == [
|
||||
1., 0.,
|
||||
0., 1.,
|
||||
1., 0.,
|
||||
1., 0.,
|
||||
0., 1.,
|
||||
1., 0.,
|
||||
]
|
||||
|
||||
|
||||
def test_instance_features_extractor():
|
||||
instances, models = _get_instances()
|
||||
features = InstanceFeaturesExtractor().extract(instances)
|
||||
assert features.shape == (2,3)
|
||||
|
||||
|
||||
def test_variable_features_extractor():
|
||||
instances, models = _get_instances()
|
||||
features = VariableFeaturesExtractor().extract(instances)
|
||||
assert isinstance(features, dict)
|
||||
assert "default" in features
|
||||
assert features["default"].shape == (6,5)
|
||||
|
||||
@@ -1,51 +0,0 @@
|
||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
from miplearn import LearningSolver, BranchPriorityComponent
|
||||
from miplearn.problems.knapsack import KnapsackInstance
|
||||
|
||||
|
||||
def _get_instance():
|
||||
return KnapsackInstance(
|
||||
weights=[23., 26., 20., 18.],
|
||||
prices=[505., 352., 458., 220.],
|
||||
capacity=67.,
|
||||
)
|
||||
|
||||
|
||||
def test_solver():
|
||||
instance = _get_instance()
|
||||
for mode in ["exact", "heuristic"]:
|
||||
for internal_solver in ["cplex", "gurobi"]:
|
||||
solver = LearningSolver(time_limit=300,
|
||||
gap_tolerance=1e-3,
|
||||
threads=1,
|
||||
solver=internal_solver,
|
||||
mode=mode,
|
||||
)
|
||||
results = solver.solve(instance)
|
||||
assert instance.solution["x"][0] == 1.0
|
||||
assert instance.solution["x"][1] == 0.0
|
||||
assert instance.solution["x"][2] == 1.0
|
||||
assert instance.solution["x"][3] == 1.0
|
||||
assert instance.lower_bound == 1183.0
|
||||
assert instance.upper_bound == 1183.0
|
||||
|
||||
assert round(instance.lp_solution["x"][0], 3) == 1.000
|
||||
assert round(instance.lp_solution["x"][1], 3) == 0.923
|
||||
assert round(instance.lp_solution["x"][2], 3) == 1.000
|
||||
assert round(instance.lp_solution["x"][3], 3) == 0.000
|
||||
assert round(instance.lp_value, 3) == 1287.923
|
||||
|
||||
solver.fit()
|
||||
solver.solve(instance)
|
||||
|
||||
def test_parallel_solve():
|
||||
instances = [_get_instance() for _ in range(10)]
|
||||
solver = LearningSolver()
|
||||
results = solver.parallel_solve(instances, n_jobs=3)
|
||||
assert len(results) == 10
|
||||
for instance in instances:
|
||||
assert len(instance.solution["x"].keys()) == 4
|
||||
|
||||
Reference in New Issue
Block a user