From 372d6eb0661e13d3fe782bcc5619e579423f12e4 Mon Sep 17 00:00:00 2001 From: "Alinson S. Xavier" Date: Thu, 21 Jan 2021 08:29:38 -0600 Subject: [PATCH] Instance: Reformat comments --- miplearn/instance.py | 108 +++++++++++++++++++++++-------------------- 1 file changed, 58 insertions(+), 50 deletions(-) diff --git a/miplearn/instance.py b/miplearn/instance.py index b00c117..e1b657d 100644 --- a/miplearn/instance.py +++ b/miplearn/instance.py @@ -14,12 +14,14 @@ from miplearn.types import TrainingSample class Instance(ABC): """ - Abstract class holding all the data necessary to generate a concrete model of the problem. - - In the knapsack problem, for example, this class could hold the number of items, their weights - and costs, as well as the size of the knapsack. Objects implementing this class are able to - convert themselves into a concrete optimization model, which can be optimized by a solver, or - into arrays of features, which can be provided as inputs to machine learning models. + Abstract class holding all the data necessary to generate a concrete model of the + problem. + + In the knapsack problem, for example, this class could hold the number of items, + their weights and costs, as well as the size of the knapsack. Objects + implementing this class are able to convert themselves into a concrete + optimization model, which can be optimized by a solver, or into arrays of + features, which can be provided as inputs to machine learning models. """ def __init__(self): @@ -34,21 +36,23 @@ class Instance(ABC): def get_instance_features(self): """ - Returns a 1-dimensional Numpy array of (numerical) features describing the entire instance. + Returns a 1-dimensional Numpy array of (numerical) features describing the + entire instance. - The array is used by LearningSolver to determine how similar two instances are. It may also - be used to predict, in combination with variable-specific features, the values of binary - decision variables in the problem. + The array is used by LearningSolver to determine how similar two instances + are. It may also be used to predict, in combination with variable-specific + features, the values of binary decision variables in the problem. - There is not necessarily a one-to-one correspondence between models and instance features: - the features may encode only part of the data necessary to generate the complete model. - Features may also be statistics computed from the original data. For example, in the - knapsack problem, an implementation may decide to provide as instance features only - the average weights, average prices, number of items and the size of the knapsack. + There is not necessarily a one-to-one correspondence between models and + instance features: the features may encode only part of the data necessary to + generate the complete model. Features may also be statistics computed from + the original data. For example, in the knapsack problem, an implementation + may decide to provide as instance features only the average weights, average + prices, number of items and the size of the knapsack. - The returned array MUST have the same length for all relevant instances of the problem. If - two instances map into arrays of different lengths, they cannot be solved by the same - LearningSolver object. + The returned array MUST have the same length for all relevant instances of + the problem. If two instances map into arrays of different lengths, + they cannot be solved by the same LearningSolver object. By default, returns [0]. """ @@ -56,20 +60,22 @@ class Instance(ABC): def get_variable_features(self, var, index): """ - Returns a 1-dimensional array of (numerical) features describing a particular decision - variable. + Returns a 1-dimensional array of (numerical) features describing a particular + decision variable. - The argument `var` is a pyomo.core.Var object, which represents a collection of decision - variables. The argument `index` specifies which variable in the collection is the relevant - one. + The argument `var` is a pyomo.core.Var object, which represents a collection + of decision variables. The argument `index` specifies which variable in the + collection is the relevant one. - In combination with instance features, variable features are used by LearningSolver to - predict, among other things, the optimal value of each decision variable before the - optimization takes place. In the knapsack problem, for example, an implementation could - provide as variable features the weight and the price of a specific item. + In combination with instance features, variable features are used by + LearningSolver to predict, among other things, the optimal value of each + decision variable before the optimization takes place. In the knapsack + problem, for example, an implementation could provide as variable features + the weight and the price of a specific item. - Like instance features, the arrays returned by this method MUST have the same length for - all variables within the same category, for all relevant instances of the problem. + Like instance features, the arrays returned by this method MUST have the same + length for all variables within the same category, for all relevant instances + of the problem. By default, returns [0]. """ @@ -77,12 +83,12 @@ class Instance(ABC): def get_variable_category(self, var, index): """ - Returns the category (a string, an integer or any hashable type) for each decision - variable. + Returns the category (a string, an integer or any hashable type) for each + decision variable. - If two variables have the same category, LearningSolver will use the same internal ML - model to predict the values of both variables. If the returned category is None, ML - models will ignore the variable. + If two variables have the same category, LearningSolver will use the same + internal ML model to predict the values of both variables. If the returned + category is None, ML models will ignore the variable. By default, returns "default". """ @@ -107,16 +113,16 @@ class Instance(ABC): """ Returns lazy constraint violations found for the current solution. - After solving a model, LearningSolver will ask the instance to identify which lazy - constraints are violated by the current solution. For each identified violation, - LearningSolver will then call the build_lazy_constraint, add the generated Pyomo - constraint to the model, then resolve the problem. The process repeats until no further - lazy constraint violations are found. + After solving a model, LearningSolver will ask the instance to identify which + lazy constraints are violated by the current solution. For each identified + violation, LearningSolver will then call the build_lazy_constraint, add the + generated Pyomo constraint to the model, then resolve the problem. The + process repeats until no further lazy constraint violations are found. - Each "violation" is simply a string, a tuple or any other hashable type which allows the - instance to identify unambiguously which lazy constraint should be generated. In the - Traveling Salesman Problem, for example, a subtour violation could be a frozen set - containing the cities in the subtour. + Each "violation" is simply a string, a tuple or any other hashable type which + allows the instance to identify unambiguously which lazy constraint should be + generated. In the Traveling Salesman Problem, for example, a subtour + violation could be a frozen set containing the cities in the subtour. For a concrete example, see TravelingSalesmanInstance. """ @@ -126,15 +132,17 @@ class Instance(ABC): """ Returns a Pyomo constraint which fixes a given violation. - This method is typically called immediately after find_violated_lazy_constraints. The violation object - provided to this method is exactly the same object returned earlier by find_violated_lazy_constraints. - After some training, LearningSolver may decide to proactively build some lazy constraints - at the beginning of the optimization process, before a solution is even available. In this - case, build_lazy_constraints will be called without a corresponding call to + This method is typically called immediately after + find_violated_lazy_constraints. The violation object provided to this method + is exactly the same object returned earlier by + find_violated_lazy_constraints. After some training, LearningSolver may + decide to proactively build some lazy constraints at the beginning of the + optimization process, before a solution is even available. In this case, + build_lazy_constraints will be called without a corresponding call to find_violated_lazy_constraints. - The implementation should not directly add the constraint to the model. The constraint - will be added by LearningSolver after the method returns. + The implementation should not directly add the constraint to the model. The + constraint will be added by LearningSolver after the method returns. For a concrete example, see TravelingSalesmanInstance. """