Temporarily remove unused files; make package work with Cbc

pull/1/head
Alinson S. Xavier 6 years ago
parent ef14f42d01
commit f538356bf6

2
.gitignore vendored

@ -1,3 +1,5 @@
TODO.md
*.bin
*$py.class
*.cover
*.egg

@ -4,13 +4,14 @@
from abc import ABC, abstractmethod
class Instance(ABC):
"""
Abstract class holding all the data necessary to generate a concrete model of the problem.
In the knapsack problem, for example, this class could hold the number of items, their weights
and costs, as well as the size of the knapsack. Objects implementing this class are able to
convert themselves into a concrete optimization model, which can be optimized by solver, or
convert themselves into a concrete optimization model, which can be optimized by a solver, or
into arrays of features, which can be provided as inputs to machine learning models.
"""

@ -6,6 +6,7 @@ import miplearn
import numpy as np
import pyomo.environ as pe
class KnapsackInstance(miplearn.Instance):
def __init__(self, weights, prices, capacity):
self.weights = weights
@ -13,15 +14,14 @@ class KnapsackInstance(miplearn.Instance):
self.capacity = capacity
def to_model(self):
model = m = pe.ConcreteModel()
model = pe.ConcreteModel()
items = range(len(self.weights))
m.x = pe.Var(items, domain=pe.Binary)
m.OBJ = pe.Objective(rule=lambda m : sum(m.x[v] * self.prices[v] for v in items),
model.x = pe.Var(items, domain=pe.Binary)
model.OBJ = pe.Objective(rule=lambda m: sum(m.x[v] * self.prices[v] for v in items),
sense=pe.maximize)
m.eq_capacity = pe.Constraint(rule = lambda m :
sum(m.x[v] * self.weights[v]
model.eq_capacity = pe.Constraint(rule=lambda m: sum(m.x[v] * self.weights[v]
for v in items) <= self.capacity)
return m
return model
def get_instance_features(self):
return np.array([
@ -35,11 +35,13 @@ class KnapsackInstance(miplearn.Instance):
self.prices[index],
])
class KnapsackInstance2(KnapsackInstance):
"""
Alternative implementation of the Knapsack Problem, which assigns a different category for each
decision variable, and therefore trains one machine learning model per variable.
"""
def get_instance_features(self):
return np.hstack([self.weights, self.prices])

@ -1,60 +0,0 @@
# MIPLearn: A Machine-Learning Framework for Mixed-Integer Optimization
# Copyright (C) 2019-2020 Argonne National Laboratory. All rights reserved.
# Written by Alinson S. Xavier <axavier@anl.gov>
import numpy as np
import pyomo.environ as pe
import networkx as nx
from miplearn import Instance
import random
class MaxStableSetGenerator:
def __init__(self, sizes=[50], densities=[0.1]):
self.sizes = sizes
self.densities = densities
def generate(self):
size = random.choice(self.sizes)
density = random.choice(self.densities)
self.graph = nx.generators.random_graphs.binomial_graph(size, density)
weights = np.ones(self.graph.number_of_nodes())
return MaxStableSetInstance(self.graph, weights)
class MaxStableSetInstance(Instance):
def __init__(self, graph, weights):
self.graph = graph
self.weights = weights
def to_model(self):
nodes = list(self.graph.nodes)
edges = list(self.graph.edges)
model = m = pe.ConcreteModel()
m.x = pe.Var(nodes, domain=pe.Binary)
m.OBJ = pe.Objective(rule=lambda m : sum(m.x[v] * self.weights[v] for v in nodes),
sense=pe.maximize)
m.edge_eqs = pe.ConstraintList()
for edge in edges:
m.edge_eqs.add(m.x[edge[0]] + m.x[edge[1]] <= 1)
return m
def get_instance_features(self):
return np.array([
self.graph.number_of_nodes(),
self.graph.number_of_edges(),
])
def get_variable_features(self, var, index):
first_neighbors = list(self.graph.neighbors(index))
second_neighbors = [list(self.graph.neighbors(u)) for u in first_neighbors]
degree = len(first_neighbors)
neighbor_degrees = sorted([len(nn) for nn in second_neighbors])
neighbor_degrees = neighbor_degrees + [100.] * 10
return np.array([
degree,
neighbor_degrees[0] - degree,
neighbor_degrees[1] - degree,
neighbor_degrees[2] - degree,
])

@ -2,10 +2,12 @@
# Copyright (C) 2019-2020 Argonne National Laboratory. All rights reserved.
# Written by Alinson S. Xavier <axavier@anl.gov>
from .warmstart import *
# from .warmstart import WarmStartPredictor
from .transformers import PerVariableTransformer
from .warmstart import WarmStartPredictor
import pyomo.environ as pe
import numpy as np
from math import isfinite
class LearningSolver:
"""
@ -14,65 +16,67 @@ class LearningSolver:
"""
def __init__(self,
threads = 4,
ws_predictor = None):
self.parent_solver = pe.SolverFactory('cplex_persistent')
threads=4,
parent_solver=pe.SolverFactory('cbc')):
self.parent_solver = parent_solver
self.parent_solver.options["threads"] = threads
self.train_x = None
self.train_y = None
self.ws_predictor = ws_predictor
self.x_train = {}
self.y_train = {}
self.ws_predictors = {}
def solve(self,
instance,
tee=False,
learn=True):
def solve(self, instance, tee=False):
# Convert instance into concrete model
model = instance.to_model()
self.parent_solver.set_instance(model)
self.cplex = self.parent_solver._solver_model
x = self._get_features(instance)
if self.ws_predictor is not None:
self.cplex.MIP_starts.delete()
ws = self.ws_predictor.predict(x)
if ws is not None:
_add_warm_start(self.cplex, ws)
# Split decision variables according to their category
transformer = PerVariableTransformer()
var_split = transformer.split_variables(instance, model)
self.parent_solver.solve(tee=tee)
# Build x_test and update x_train
x_test = {}
for category in var_split.keys():
var_index_pairs = var_split[category]
x = transformer.transform_instance(instance, var_index_pairs)
x_test[category] = x
if category not in self.x_train.keys():
self.x_train[category] = x
else:
self.x_train[category] = np.vstack([self.x_train[category], x])
solution = np.array(self.cplex.solution.get_values())
y = np.transpose(np.vstack((solution, 1 - solution)))
self._update_training_set(x, y)
return y
# Predict warm start
for category in var_split.keys():
if category in self.ws_predictors.keys():
var_index_pairs = var_split[category]
ws = self.ws_predictors[category].predict(x_test[category])
assert ws.shape == (len(var_index_pairs), 2)
for i in range(len(var_index_pairs)):
var, index = var_index_pairs[i]
if ws[i,0] == 1:
var[index].value = 1
elif ws[i,1] == 1:
var[index].value = 0
def transform(self, instance):
model = instance.to_model()
self.parent_solver.set_instance(model)
self.cplex = self.parent_solver._solver_model
return self._get_features(instance)
# Solve MILP
self.parent_solver.solve(model, tee=tee, warmstart=True)
def predict(self, instance):
pass
def _update_training_set(self, x, y):
if self.train_x is None:
self.train_x = x
self.train_y = y
# Update y_train
for category in var_split.keys():
var_index_pairs = var_split[category]
y = transformer.transform_solution(var_index_pairs)
if category not in self.y_train.keys():
self.y_train[category] = y
else:
self.train_x = np.vstack((self.train_x, x))
self.train_y = np.vstack((self.train_y, y))
def fit(self):
if self.ws_predictor is not None:
self.ws_predictor.fit(self.train_x, self.train_y)
self.y_train[category] = np.vstack([self.y_train[category], y])
def _add_warm_start(cplex, ws):
assert isinstance(ws, np.ndarray)
assert ws.shape == (cplex.variables.get_num(),)
indices, values = [], []
for k in range(len(ws)):
if isfinite(ws[k]):
indices += [k]
values += [ws[k]]
print("Adding warm start with %d values" % len(indices))
cplex.MIP_starts.add([indices, values], cplex.MIP_starts.effort_level.solve_MIP)
def fit(self, x_train_dict=None, y_train_dict=None):
if x_train_dict is None:
x_train_dict = self.x_train
y_train_dict = self.y_train
for category in x_train_dict.keys():
x_train = x_train_dict[category]
y_train = y_train_dict[category]
self.ws_predictors[category] = WarmStartPredictor()
self.ws_predictors[category].fit(x_train, y_train)
def _solve(self, tee):
self.parent_solver.solve(tee=tee)

@ -0,0 +1,16 @@
# MIPLearn: A Machine-Learning Framework for Mixed-Integer Optimization
# Copyright (C) 2019-2020 Argonne National Laboratory. All rights reserved.
# Written by Alinson S. Xavier <axavier@anl.gov>
from miplearn import LearningSolver
from miplearn.problems.knapsack import KnapsackInstance2
def test_solver():
instance = KnapsackInstance2(weights=[23., 26., 20., 18.],
prices=[505., 352., 458., 220.],
capacity=67.)
solver = LearningSolver()
solver.solve(instance)
solver.fit()
solver.solve(instance)

@ -2,18 +2,21 @@
# Copyright (C) 2019-2020 Argonne National Laboratory. All rights reserved.
# Written by Alinson S. Xavier <axavier@anl.gov>
from miplearn import Instance, LearningSolver
from miplearn.transformers import PerVariableTransformer
from miplearn.problems.knapsack import KnapsackInstance, KnapsackInstance2
import numpy as np
import pyomo.environ as pe
def test_transform():
transformer = PerVariableTransformer()
instance = KnapsackInstance(weights=[23., 26., 20., 18.],
prices=[505., 352., 458., 220.],
capacity=67.)
model = instance.to_model()
solver = pe.SolverFactory('cbc')
solver.options["threads"] = 1
solver.solve(model)
var_split = transformer.split_variables(instance, model)
var_split_expected = {
@ -34,14 +37,16 @@ def test_transform():
[67., 21.75, 20., 458.],
[67., 21.75, 18., 220.],
])
assert x_expected.tolist() == x_actual.tolist()
assert x_expected.tolist() == np.round(x_actual, decimals=2).tolist()
solver = pe.SolverFactory('cplex')
solver.options["threads"] = 1
solver.solve(model)
y_actual = transformer.transform_solution(var_index_pairs)
y_expected = np.array([1., 0., 1., 1.])
y_expected = np.array([
[0., 1.],
[1., 0.],
[0., 1.],
[0., 1.],
])
assert y_actual.tolist() == y_expected.tolist()
@ -51,6 +56,9 @@ def test_transform_with_categories():
prices=[505., 352., 458., 220.],
capacity=67.)
model = instance.to_model()
solver = pe.SolverFactory('cbc')
solver.options["threads"] = 1
solver.solve(model)
var_split = transformer.split_variables(instance, model)
var_split_expected = {
@ -63,13 +71,13 @@ def test_transform_with_categories():
var_index_pairs = var_split[0]
x_actual = transformer.transform_instance(instance, var_index_pairs)
x_expected = np.array([[23., 26., 20., 18., 505., 352., 458., 220.]])
assert x_expected.tolist() == x_actual.tolist()
x_expected = np.array([
[23., 26., 20., 18., 505., 352., 458., 220.]
])
assert x_expected.tolist() == np.round(x_actual, decimals=2).tolist()
solver = pe.SolverFactory('cplex')
solver.options["threads"] = 1
solver.solve(model)
y_actual = transformer.transform_solution(var_index_pairs)
y_expected = np.array([1.])
y_expected = np.array([[0., 1.]])
assert y_actual.tolist() == y_expected.tolist()

@ -5,11 +5,13 @@
import numpy as np
from pyomo.core import Var
class PerVariableTransformer:
"""
Class that converts a miplearn.Instance into a matrix of features that is suitable
for training machine learning models that make one decision per decision variable.
"""
def __init__(self):
pass
@ -21,12 +23,14 @@ class PerVariableTransformer:
for vf in variable_features
])
def _get_instance_features(self, instance):
@staticmethod
def _get_instance_features(instance):
features = instance.get_instance_features()
assert isinstance(features, np.ndarray)
return features
def _get_variable_features(self, instance, var_index_pairs):
@staticmethod
def _get_variable_features(instance, var_index_pairs):
features = []
expected_shape = None
for (var, index) in var_index_pairs:
@ -40,18 +44,20 @@ class PerVariableTransformer:
features += [vf]
return np.array(features)
def transform_solution(self, var_index_pairs):
@staticmethod
def transform_solution(var_index_pairs):
y = []
for (var, index) in var_index_pairs:
y += [var[index].value]
y += [[1 - var[index].value, var[index].value]]
return np.array(y)
def split_variables(self, instance, model):
@staticmethod
def split_variables(instance, model):
result = {}
for var in model.component_objects(Var):
for index in var:
category = instance.get_variable_category(var, index)
if category not in result.keys():
result[category] = []
result[category] += [(var,index)]
result[category] += [(var, index)]
return result

@ -2,28 +2,42 @@
# Copyright (C) 2019-2020 Argonne National Laboratory. All rights reserved.
# Written by Alinson S. Xavier <axavier@anl.gov>
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten, Activation
import numpy as np
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
class WarmStartPredictor:
def __init__(self, model=None, threshold=0.80):
self.model = model
self.threshold = threshold
def fit(self, train_x, train_y):
pass
def predict(self, x):
if self.model is None: return None
assert isinstance(x, np.ndarray)
y = self.model.predict(x)
n_vars = y.shape[0]
ws = np.array([float("nan")] * n_vars)
ws[y[:,0] > self.threshold] = 1.0
ws[y[:,1] > self.threshold] = 0.0
return ws
class WarmStartPredictor:
def __init__(self,
thr_fix_zero=0.05,
thr_fix_one=0.95,
thr_predict=0.95):
self.model = None
self.thr_predict = thr_predict
self.thr_fix_zero = thr_fix_zero
self.thr_fix_one = thr_fix_one
def fit(self, x_train, y_train):
assert isinstance(x_train, np.ndarray)
assert isinstance(y_train, np.ndarray)
assert y_train.shape[1] == 2
assert y_train.shape[0] == x_train.shape[0]
y_hat = np.average(y_train[:, 1])
if y_hat < self.thr_fix_zero or y_hat > self.thr_fix_one:
self.model = int(y_hat)
else:
self.model = make_pipeline(StandardScaler(), LogisticRegression())
self.model.fit(x_train, y_train[:, 1].astype(int))
def predict(self, x_test):
assert isinstance(x_test, np.ndarray)
if isinstance(self.model, int):
p_test = np.array([[1 - self.model, self.model]
for _ in range(x_test.shape[0])])
else:
p_test = self.model.predict_proba(x_test)
p_test[p_test < self.thr_predict] = 0
p_test[p_test > 0] = 1
p_test = p_test.astype(int)
return p_test

@ -0,0 +1,4 @@
pyomo
numpy
pytest
sklearn

@ -7,5 +7,5 @@ setup(
author='Alinson S. Xavier',
author_email='axavier@anl.gov',
packages=['miplearn'],
install_requires=['pyomo'],
install_requires=['pyomo', 'numpy', 'sklearn'],
)
Loading…
Cancel
Save