Temporarily remove unused files; make package work with Cbc

pull/1/head
Alinson S. Xavier 6 years ago
parent ef14f42d01
commit f538356bf6

2
.gitignore vendored

@ -1,3 +1,5 @@
TODO.md
*.bin
*$py.class *$py.class
*.cover *.cover
*.egg *.egg

@ -3,4 +3,4 @@
# Written by Alinson S. Xavier <axavier@anl.gov> # Written by Alinson S. Xavier <axavier@anl.gov>
from .instance import Instance from .instance import Instance
from .solvers import LearningSolver from .solvers import LearningSolver

@ -4,23 +4,24 @@
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
class Instance(ABC): class Instance(ABC):
""" """
Abstract class holding all the data necessary to generate a concrete model of the problem. Abstract class holding all the data necessary to generate a concrete model of the problem.
In the knapsack problem, for example, this class could hold the number of items, their weights In the knapsack problem, for example, this class could hold the number of items, their weights
and costs, as well as the size of the knapsack. Objects implementing this class are able to and costs, as well as the size of the knapsack. Objects implementing this class are able to
convert themselves into a concrete optimization model, which can be optimized by solver, or convert themselves into a concrete optimization model, which can be optimized by a solver, or
into arrays of features, which can be provided as inputs to machine learning models. into arrays of features, which can be provided as inputs to machine learning models.
""" """
@abstractmethod @abstractmethod
def to_model(self): def to_model(self):
""" """
Returns a concrete Pyomo model corresponding to this instance. Returns a concrete Pyomo model corresponding to this instance.
""" """
pass pass
@abstractmethod @abstractmethod
def get_instance_features(self): def get_instance_features(self):
""" """
@ -65,4 +66,4 @@ class Instance(ABC):
pass pass
def get_variable_category(self, var, index): def get_variable_category(self, var, index):
return "default" return "default"

@ -6,46 +6,48 @@ import miplearn
import numpy as np import numpy as np
import pyomo.environ as pe import pyomo.environ as pe
class KnapsackInstance(miplearn.Instance): class KnapsackInstance(miplearn.Instance):
def __init__(self, weights, prices, capacity): def __init__(self, weights, prices, capacity):
self.weights = weights self.weights = weights
self.prices = prices self.prices = prices
self.capacity = capacity self.capacity = capacity
def to_model(self): def to_model(self):
model = m = pe.ConcreteModel() model = pe.ConcreteModel()
items = range(len(self.weights)) items = range(len(self.weights))
m.x = pe.Var(items, domain=pe.Binary) model.x = pe.Var(items, domain=pe.Binary)
m.OBJ = pe.Objective(rule=lambda m : sum(m.x[v] * self.prices[v] for v in items), model.OBJ = pe.Objective(rule=lambda m: sum(m.x[v] * self.prices[v] for v in items),
sense=pe.maximize) sense=pe.maximize)
m.eq_capacity = pe.Constraint(rule = lambda m : model.eq_capacity = pe.Constraint(rule=lambda m: sum(m.x[v] * self.weights[v]
sum(m.x[v] * self.weights[v] for v in items) <= self.capacity)
for v in items) <= self.capacity) return model
return m
def get_instance_features(self): def get_instance_features(self):
return np.array([ return np.array([
self.capacity, self.capacity,
np.average(self.weights), np.average(self.weights),
]) ])
def get_variable_features(self, var, index): def get_variable_features(self, var, index):
return np.array([ return np.array([
self.weights[index], self.weights[index],
self.prices[index], self.prices[index],
]) ])
class KnapsackInstance2(KnapsackInstance): class KnapsackInstance2(KnapsackInstance):
""" """
Alternative implementation of the Knapsack Problem, which assigns a different category for each Alternative implementation of the Knapsack Problem, which assigns a different category for each
decision variable, and therefore trains one machine learning model per variable. decision variable, and therefore trains one machine learning model per variable.
""" """
def get_instance_features(self): def get_instance_features(self):
return np.hstack([self.weights, self.prices]) return np.hstack([self.weights, self.prices])
def get_variable_features(self, var, index): def get_variable_features(self, var, index):
return np.array([ return np.array([
]) ])
def get_variable_category(self, var, index): def get_variable_category(self, var, index):
return index return index

@ -1,60 +0,0 @@
# MIPLearn: A Machine-Learning Framework for Mixed-Integer Optimization
# Copyright (C) 2019-2020 Argonne National Laboratory. All rights reserved.
# Written by Alinson S. Xavier <axavier@anl.gov>
import numpy as np
import pyomo.environ as pe
import networkx as nx
from miplearn import Instance
import random
class MaxStableSetGenerator:
def __init__(self, sizes=[50], densities=[0.1]):
self.sizes = sizes
self.densities = densities
def generate(self):
size = random.choice(self.sizes)
density = random.choice(self.densities)
self.graph = nx.generators.random_graphs.binomial_graph(size, density)
weights = np.ones(self.graph.number_of_nodes())
return MaxStableSetInstance(self.graph, weights)
class MaxStableSetInstance(Instance):
def __init__(self, graph, weights):
self.graph = graph
self.weights = weights
def to_model(self):
nodes = list(self.graph.nodes)
edges = list(self.graph.edges)
model = m = pe.ConcreteModel()
m.x = pe.Var(nodes, domain=pe.Binary)
m.OBJ = pe.Objective(rule=lambda m : sum(m.x[v] * self.weights[v] for v in nodes),
sense=pe.maximize)
m.edge_eqs = pe.ConstraintList()
for edge in edges:
m.edge_eqs.add(m.x[edge[0]] + m.x[edge[1]] <= 1)
return m
def get_instance_features(self):
return np.array([
self.graph.number_of_nodes(),
self.graph.number_of_edges(),
])
def get_variable_features(self, var, index):
first_neighbors = list(self.graph.neighbors(index))
second_neighbors = [list(self.graph.neighbors(u)) for u in first_neighbors]
degree = len(first_neighbors)
neighbor_degrees = sorted([len(nn) for nn in second_neighbors])
neighbor_degrees = neighbor_degrees + [100.] * 10
return np.array([
degree,
neighbor_degrees[0] - degree,
neighbor_degrees[1] - degree,
neighbor_degrees[2] - degree,
])

@ -2,77 +2,81 @@
# Copyright (C) 2019-2020 Argonne National Laboratory. All rights reserved. # Copyright (C) 2019-2020 Argonne National Laboratory. All rights reserved.
# Written by Alinson S. Xavier <axavier@anl.gov> # Written by Alinson S. Xavier <axavier@anl.gov>
from .warmstart import * # from .warmstart import WarmStartPredictor
from .transformers import PerVariableTransformer
from .warmstart import WarmStartPredictor
import pyomo.environ as pe import pyomo.environ as pe
import numpy as np import numpy as np
from math import isfinite
class LearningSolver: class LearningSolver:
""" """
Mixed-Integer Linear Programming (MIP) solver that extracts information from previous runs, Mixed-Integer Linear Programming (MIP) solver that extracts information from previous runs,
using Machine Learning methods, to accelerate the solution of new (yet unseen) instances. using Machine Learning methods, to accelerate the solution of new (yet unseen) instances.
""" """
def __init__(self, def __init__(self,
threads = 4, threads=4,
ws_predictor = None): parent_solver=pe.SolverFactory('cbc')):
self.parent_solver = pe.SolverFactory('cplex_persistent') self.parent_solver = parent_solver
self.parent_solver.options["threads"] = threads self.parent_solver.options["threads"] = threads
self.train_x = None self.x_train = {}
self.train_y = None self.y_train = {}
self.ws_predictor = ws_predictor self.ws_predictors = {}
def solve(self,
instance,
tee=False,
learn=True):
model = instance.to_model()
self.parent_solver.set_instance(model)
self.cplex = self.parent_solver._solver_model
x = self._get_features(instance)
if self.ws_predictor is not None:
self.cplex.MIP_starts.delete()
ws = self.ws_predictor.predict(x)
if ws is not None:
_add_warm_start(self.cplex, ws)
self.parent_solver.solve(tee=tee)
solution = np.array(self.cplex.solution.get_values()) def solve(self, instance, tee=False):
y = np.transpose(np.vstack((solution, 1 - solution))) # Convert instance into concrete model
self._update_training_set(x, y)
return y
def transform(self, instance):
model = instance.to_model() model = instance.to_model()
self.parent_solver.set_instance(model)
self.cplex = self.parent_solver._solver_model
return self._get_features(instance)
def predict(self, instance):
pass
def _update_training_set(self, x, y): # Split decision variables according to their category
if self.train_x is None: transformer = PerVariableTransformer()
self.train_x = x var_split = transformer.split_variables(instance, model)
self.train_y = y
else: # Build x_test and update x_train
self.train_x = np.vstack((self.train_x, x)) x_test = {}
self.train_y = np.vstack((self.train_y, y)) for category in var_split.keys():
var_index_pairs = var_split[category]
def fit(self): x = transformer.transform_instance(instance, var_index_pairs)
if self.ws_predictor is not None: x_test[category] = x
self.ws_predictor.fit(self.train_x, self.train_y) if category not in self.x_train.keys():
self.x_train[category] = x
else:
self.x_train[category] = np.vstack([self.x_train[category], x])
# Predict warm start
for category in var_split.keys():
if category in self.ws_predictors.keys():
var_index_pairs = var_split[category]
ws = self.ws_predictors[category].predict(x_test[category])
assert ws.shape == (len(var_index_pairs), 2)
for i in range(len(var_index_pairs)):
var, index = var_index_pairs[i]
if ws[i,0] == 1:
var[index].value = 1
elif ws[i,1] == 1:
var[index].value = 0
# Solve MILP
self.parent_solver.solve(model, tee=tee, warmstart=True)
# Update y_train
for category in var_split.keys():
var_index_pairs = var_split[category]
y = transformer.transform_solution(var_index_pairs)
if category not in self.y_train.keys():
self.y_train[category] = y
else:
self.y_train[category] = np.vstack([self.y_train[category], y])
def _add_warm_start(cplex, ws): def fit(self, x_train_dict=None, y_train_dict=None):
assert isinstance(ws, np.ndarray) if x_train_dict is None:
assert ws.shape == (cplex.variables.get_num(),) x_train_dict = self.x_train
indices, values = [], [] y_train_dict = self.y_train
for k in range(len(ws)): for category in x_train_dict.keys():
if isfinite(ws[k]): x_train = x_train_dict[category]
indices += [k] y_train = y_train_dict[category]
values += [ws[k]] self.ws_predictors[category] = WarmStartPredictor()
print("Adding warm start with %d values" % len(indices)) self.ws_predictors[category].fit(x_train, y_train)
cplex.MIP_starts.add([indices, values], cplex.MIP_starts.effort_level.solve_MIP)
def _solve(self, tee):
self.parent_solver.solve(tee=tee)

@ -0,0 +1,16 @@
# MIPLearn: A Machine-Learning Framework for Mixed-Integer Optimization
# Copyright (C) 2019-2020 Argonne National Laboratory. All rights reserved.
# Written by Alinson S. Xavier <axavier@anl.gov>
from miplearn import LearningSolver
from miplearn.problems.knapsack import KnapsackInstance2
def test_solver():
instance = KnapsackInstance2(weights=[23., 26., 20., 18.],
prices=[505., 352., 458., 220.],
capacity=67.)
solver = LearningSolver()
solver.solve(instance)
solver.fit()
solver.solve(instance)

@ -2,19 +2,22 @@
# Copyright (C) 2019-2020 Argonne National Laboratory. All rights reserved. # Copyright (C) 2019-2020 Argonne National Laboratory. All rights reserved.
# Written by Alinson S. Xavier <axavier@anl.gov> # Written by Alinson S. Xavier <axavier@anl.gov>
from miplearn import Instance, LearningSolver
from miplearn.transformers import PerVariableTransformer from miplearn.transformers import PerVariableTransformer
from miplearn.problems.knapsack import KnapsackInstance, KnapsackInstance2 from miplearn.problems.knapsack import KnapsackInstance, KnapsackInstance2
import numpy as np import numpy as np
import pyomo.environ as pe import pyomo.environ as pe
def test_transform(): def test_transform():
transformer = PerVariableTransformer() transformer = PerVariableTransformer()
instance = KnapsackInstance(weights=[23., 26., 20., 18.], instance = KnapsackInstance(weights=[23., 26., 20., 18.],
prices=[505., 352., 458., 220.], prices=[505., 352., 458., 220.],
capacity=67.) capacity=67.)
model = instance.to_model() model = instance.to_model()
solver = pe.SolverFactory('cbc')
solver.options["threads"] = 1
solver.solve(model)
var_split = transformer.split_variables(instance, model) var_split = transformer.split_variables(instance, model)
var_split_expected = { var_split_expected = {
"default": [ "default": [
@ -26,7 +29,7 @@ def test_transform():
} }
assert var_split == var_split_expected assert var_split == var_split_expected
var_index_pairs = [(model.x, i) for i in range(4)] var_index_pairs = [(model.x, i) for i in range(4)]
x_actual = transformer.transform_instance(instance, var_index_pairs) x_actual = transformer.transform_instance(instance, var_index_pairs)
x_expected = np.array([ x_expected = np.array([
[67., 21.75, 23., 505.], [67., 21.75, 23., 505.],
@ -34,24 +37,29 @@ def test_transform():
[67., 21.75, 20., 458.], [67., 21.75, 20., 458.],
[67., 21.75, 18., 220.], [67., 21.75, 18., 220.],
]) ])
assert x_expected.tolist() == x_actual.tolist() assert x_expected.tolist() == np.round(x_actual, decimals=2).tolist()
solver = pe.SolverFactory('cplex')
solver.options["threads"] = 1
solver.solve(model) solver.solve(model)
y_actual = transformer.transform_solution(var_index_pairs) y_actual = transformer.transform_solution(var_index_pairs)
y_expected = np.array([1., 0., 1., 1.]) y_expected = np.array([
[0., 1.],
[1., 0.],
[0., 1.],
[0., 1.],
])
assert y_actual.tolist() == y_expected.tolist() assert y_actual.tolist() == y_expected.tolist()
def test_transform_with_categories(): def test_transform_with_categories():
transformer = PerVariableTransformer() transformer = PerVariableTransformer()
instance = KnapsackInstance2(weights=[23., 26., 20., 18.], instance = KnapsackInstance2(weights=[23., 26., 20., 18.],
prices=[505., 352., 458., 220.], prices=[505., 352., 458., 220.],
capacity=67.) capacity=67.)
model = instance.to_model() model = instance.to_model()
solver = pe.SolverFactory('cbc')
solver.options["threads"] = 1
solver.solve(model)
var_split = transformer.split_variables(instance, model) var_split = transformer.split_variables(instance, model)
var_split_expected = { var_split_expected = {
0: [(model.x, 0)], 0: [(model.x, 0)],
@ -63,13 +71,13 @@ def test_transform_with_categories():
var_index_pairs = var_split[0] var_index_pairs = var_split[0]
x_actual = transformer.transform_instance(instance, var_index_pairs) x_actual = transformer.transform_instance(instance, var_index_pairs)
x_expected = np.array([[23., 26., 20., 18., 505., 352., 458., 220.]]) x_expected = np.array([
assert x_expected.tolist() == x_actual.tolist() [23., 26., 20., 18., 505., 352., 458., 220.]
])
assert x_expected.tolist() == np.round(x_actual, decimals=2).tolist()
solver = pe.SolverFactory('cplex')
solver.options["threads"] = 1
solver.solve(model) solver.solve(model)
y_actual = transformer.transform_solution(var_index_pairs) y_actual = transformer.transform_solution(var_index_pairs)
y_expected = np.array([1.]) y_expected = np.array([[0., 1.]])
assert y_actual.tolist() == y_expected.tolist() assert y_actual.tolist() == y_expected.tolist()

@ -5,14 +5,16 @@
import numpy as np import numpy as np
from pyomo.core import Var from pyomo.core import Var
class PerVariableTransformer: class PerVariableTransformer:
""" """
Class that converts a miplearn.Instance into a matrix of features that is suitable Class that converts a miplearn.Instance into a matrix of features that is suitable
for training machine learning models that make one decision per decision variable. for training machine learning models that make one decision per decision variable.
""" """
def __init__(self): def __init__(self):
pass pass
def transform_instance(self, instance, var_index_pairs): def transform_instance(self, instance, var_index_pairs):
instance_features = self._get_instance_features(instance) instance_features = self._get_instance_features(instance)
variable_features = self._get_variable_features(instance, var_index_pairs) variable_features = self._get_variable_features(instance, var_index_pairs)
@ -20,13 +22,15 @@ class PerVariableTransformer:
np.hstack([instance_features, vf]) np.hstack([instance_features, vf])
for vf in variable_features for vf in variable_features
]) ])
def _get_instance_features(self, instance): @staticmethod
def _get_instance_features(instance):
features = instance.get_instance_features() features = instance.get_instance_features()
assert isinstance(features, np.ndarray) assert isinstance(features, np.ndarray)
return features return features
def _get_variable_features(self, instance, var_index_pairs): @staticmethod
def _get_variable_features(instance, var_index_pairs):
features = [] features = []
expected_shape = None expected_shape = None
for (var, index) in var_index_pairs: for (var, index) in var_index_pairs:
@ -39,19 +43,21 @@ class PerVariableTransformer:
assert vf.shape == expected_shape assert vf.shape == expected_shape
features += [vf] features += [vf]
return np.array(features) return np.array(features)
def transform_solution(self, var_index_pairs): @staticmethod
def transform_solution(var_index_pairs):
y = [] y = []
for (var, index) in var_index_pairs: for (var, index) in var_index_pairs:
y += [var[index].value] y += [[1 - var[index].value, var[index].value]]
return np.array(y) return np.array(y)
def split_variables(self, instance, model): @staticmethod
def split_variables(instance, model):
result = {} result = {}
for var in model.component_objects(Var): for var in model.component_objects(Var):
for index in var: for index in var:
category = instance.get_variable_category(var, index) category = instance.get_variable_category(var, index)
if category not in result.keys(): if category not in result.keys():
result[category] = [] result[category] = []
result[category] += [(var,index)] result[category] += [(var, index)]
return result return result

@ -2,28 +2,42 @@
# Copyright (C) 2019-2020 Argonne National Laboratory. All rights reserved. # Copyright (C) 2019-2020 Argonne National Laboratory. All rights reserved.
# Written by Alinson S. Xavier <axavier@anl.gov> # Written by Alinson S. Xavier <axavier@anl.gov>
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten, Activation
import numpy as np import numpy as np
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
class WarmStartPredictor: class WarmStartPredictor:
def __init__(self, model=None, threshold=0.80): def __init__(self,
self.model = model thr_fix_zero=0.05,
self.threshold = threshold thr_fix_one=0.95,
thr_predict=0.95):
def fit(self, train_x, train_y): self.model = None
pass self.thr_predict = thr_predict
self.thr_fix_zero = thr_fix_zero
def predict(self, x): self.thr_fix_one = thr_fix_one
if self.model is None: return None
assert isinstance(x, np.ndarray) def fit(self, x_train, y_train):
y = self.model.predict(x) assert isinstance(x_train, np.ndarray)
n_vars = y.shape[0] assert isinstance(y_train, np.ndarray)
ws = np.array([float("nan")] * n_vars) assert y_train.shape[1] == 2
ws[y[:,0] > self.threshold] = 1.0 assert y_train.shape[0] == x_train.shape[0]
ws[y[:,1] > self.threshold] = 0.0 y_hat = np.average(y_train[:, 1])
return ws if y_hat < self.thr_fix_zero or y_hat > self.thr_fix_one:
self.model = int(y_hat)
else:
self.model = make_pipeline(StandardScaler(), LogisticRegression())
self.model.fit(x_train, y_train[:, 1].astype(int))
def predict(self, x_test):
assert isinstance(x_test, np.ndarray)
if isinstance(self.model, int):
p_test = np.array([[1 - self.model, self.model]
for _ in range(x_test.shape[0])])
else:
p_test = self.model.predict_proba(x_test)
p_test[p_test < self.thr_predict] = 0
p_test[p_test > 0] = 1
p_test = p_test.astype(int)
return p_test

@ -0,0 +1,4 @@
pyomo
numpy
pytest
sklearn

@ -7,5 +7,5 @@ setup(
author='Alinson S. Xavier', author='Alinson S. Xavier',
author_email='axavier@anl.gov', author_email='axavier@anl.gov',
packages=['miplearn'], packages=['miplearn'],
install_requires=['pyomo'], install_requires=['pyomo', 'numpy', 'sklearn'],
) )
Loading…
Cancel
Save