Add type annotations to components

master
Alinson S. Xavier 5 years ago
parent a98a783969
commit fc0835e694

@ -5,7 +5,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: [3.6, 3.7, 3.8]
python-version: [3.7, 3.8, 3.9]
steps:
- name: Check out source code

@ -4,6 +4,7 @@
import logging
from copy import deepcopy
from typing import Any, Dict
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
@ -25,9 +26,9 @@ class AdaptiveClassifier(Classifier):
def __init__(
self,
candidates=None,
evaluator=ClassifierEvaluator(),
):
candidates: Dict[str, Any] = None,
evaluator: ClassifierEvaluator = ClassifierEvaluator(),
) -> None:
"""
Initializes the meta-classifier.
"""

@ -15,7 +15,7 @@ class CountingClassifier(Classifier):
counts how many times each label appeared, hence the name.
"""
def __init__(self):
def __init__(self) -> None:
self.mean = None
def fit(self, x_train, y_train):

@ -6,7 +6,7 @@ from sklearn.metrics import roc_auc_score
class ClassifierEvaluator:
def __init__(self):
def __init__(self) -> None:
pass
def evaluate(self, clf, x_train, y_train):

@ -7,10 +7,17 @@ from abc import abstractmethod, ABC
import numpy as np
from sklearn.metrics._ranking import _binary_clf_curve
from miplearn.classifiers import Classifier
class DynamicThreshold(ABC):
@abstractmethod
def find(self, clf, x_train, y_train):
def find(
self,
clf: Classifier,
x_train: np.ndarray,
y_train: np.ndarray,
) -> float:
"""
Given a trained binary classifier `clf` and a training data set,
returns the numerical threshold (float) satisfying some criterea.
@ -24,7 +31,7 @@ class MinPrecisionThreshold(DynamicThreshold):
positive rate (also known as precision).
"""
def __init__(self, min_precision):
def __init__(self, min_precision: float) -> None:
self.min_precision = min_precision
def find(self, clf, x_train, y_train):

@ -2,8 +2,16 @@
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Any, List, Union, TYPE_CHECKING
from miplearn.instance import Instance
from miplearn.types import MIPSolveStats, TrainingSample
if TYPE_CHECKING:
from miplearn.solvers.learning import LearningSolver
class Component(ABC):
@ -15,18 +23,35 @@ class Component(ABC):
strategy.
"""
def before_solve(self, solver, instance, model):
def before_solve(
self,
solver: LearningSolver,
instance: Instance,
model: Any,
) -> None:
"""
Method called by LearningSolver before the problem is solved.
Parameters
----------
solver
The solver calling this method.
instance
The instance being solved.
model
The concrete optimization model being solved.
"""
return
@abstractmethod
def after_solve(
self,
solver,
instance,
model,
stats,
training_data,
):
solver: LearningSolver,
instance: Instance,
model: Any,
stats: MIPSolveStats,
training_data: TrainingSample,
) -> None:
"""
Method called by LearningSolver after the problem is solved to optimality.
@ -40,19 +65,23 @@ class Component(ABC):
The concrete optimization model being solved.
stats: dict
A dictionary containing statistics about the solution process, such as
number of nodes explored and running time. Components are free to add their own
statistics here. For example, PrimalSolutionComponent adds statistics regarding
the number of predicted variables. All statistics in this dictionary are exported
to the benchmark CSV file.
number of nodes explored and running time. Components are free to add
their own statistics here. For example, PrimalSolutionComponent adds
statistics regarding the number of predicted variables. All statistics in
this dictionary are exported to the benchmark CSV file.
training_data: dict
A dictionary containing data that may be useful for training machine learning
models and accelerating the solution process. Components are free to add their
own training data here. For example, PrimalSolutionComponent adds the current
primal solution. The data must be pickable.
A dictionary containing data that may be useful for training machine
learning models and accelerating the solution process. Components are
free to add their own training data here. For example,
PrimalSolutionComponent adds the current primal solution. The data must
be pickable.
"""
pass
def fit(self, training_instances):
def fit(
self,
training_instances: Union[List[str], List[Instance]],
) -> None:
return
def iteration_cb(self, solver, instance, model):

@ -5,10 +5,12 @@
import logging
import sys
from copy import deepcopy
from typing import Any, Dict
import numpy as np
from tqdm.auto import tqdm
from miplearn.classifiers import Classifier
from miplearn.classifiers.counting import CountingClassifier
from miplearn.components import classifier_evaluation_dict
from miplearn.components.component import Component
@ -24,15 +26,12 @@ class UserCutsComponent(Component):
def __init__(
self,
classifier=CountingClassifier(),
threshold=0.05,
classifier: Classifier = CountingClassifier(),
threshold: float = 0.05,
):
self.violations = set()
self.count = {}
self.n_samples = 0
self.threshold = threshold
self.classifier_prototype = classifier
self.classifiers = {}
self.threshold: float = threshold
self.classifier_prototype: Classifier = classifier
self.classifiers: Dict[Any, Classifier] = {}
def before_solve(self, solver, instance, model):
instance.found_violated_user_cuts = []

@ -5,10 +5,12 @@
import logging
import sys
from copy import deepcopy
from typing import Any, Dict
import numpy as np
from tqdm.auto import tqdm
from miplearn.classifiers import Classifier
from miplearn.classifiers.counting import CountingClassifier
from miplearn.components import classifier_evaluation_dict
from miplearn.components.component import Component
@ -24,15 +26,12 @@ class DynamicLazyConstraintsComponent(Component):
def __init__(
self,
classifier=CountingClassifier(),
threshold=0.05,
classifier: Classifier = CountingClassifier(),
threshold: float = 0.05,
):
self.violations = set()
self.count = {}
self.n_samples = 0
self.threshold = threshold
self.classifier_prototype = classifier
self.classifiers = {}
self.threshold: float = threshold
self.classifier_prototype: Classifier = classifier
self.classifiers: Dict[Any, Classifier] = {}
def before_solve(self, solver, instance, model):
instance.found_violated_lazy_constraints = []

@ -15,6 +15,7 @@ from sklearn.metrics import (
r2_score,
)
from miplearn.classifiers import Regressor
from miplearn.components.component import Component
from miplearn.extractors import InstanceFeaturesExtractor, ObjectiveValueExtractor
@ -26,7 +27,10 @@ class ObjectiveValueComponent(Component):
A Component which predicts the optimal objective value of the problem.
"""
def __init__(self, regressor=LinearRegression()):
def __init__(
self,
regressor: Regressor = LinearRegression(),
) -> None:
self.ub_regressor = None
self.lb_regressor = None
self.regressor_prototype = regressor

@ -2,14 +2,19 @@
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
import logging
from copy import deepcopy
import sys
from typing import Union, Dict, Any
from .component import Component
from ..classifiers.adaptive import AdaptiveClassifier
from ..classifiers.threshold import MinPrecisionThreshold, DynamicThreshold
from ..components import classifier_evaluation_dict
from ..extractors import *
import numpy as np
from tqdm.auto import tqdm
from miplearn.classifiers import Classifier
from miplearn.classifiers.adaptive import AdaptiveClassifier
from miplearn.classifiers.threshold import MinPrecisionThreshold, DynamicThreshold
from miplearn.components import classifier_evaluation_dict
from miplearn.components.component import Component
from miplearn.extractors import VariableFeaturesExtractor, SolutionExtractor, Extractor
logger = logging.getLogger(__name__)
@ -21,13 +26,13 @@ class PrimalSolutionComponent(Component):
def __init__(
self,
classifier=AdaptiveClassifier(),
mode="exact",
threshold=MinPrecisionThreshold(0.98),
):
classifier: Classifier = AdaptiveClassifier(),
mode: str = "exact",
threshold: Union[float, DynamicThreshold] = MinPrecisionThreshold(0.98),
) -> None:
self.mode = mode
self.classifiers = {}
self.thresholds = {}
self.classifiers: Dict[Any, Classifier] = {}
self.thresholds: Dict[Any, Union[float, DynamicThreshold]] = {}
self.threshold_prototype = threshold
self.classifier_prototype = classifier

@ -8,10 +8,12 @@ import os
import pickle
import tempfile
from copy import deepcopy
from typing import Optional, List, Any, IO, cast, BinaryIO, Union
from typing import Optional, List, Any, IO, cast, BinaryIO, Union, Callable, Dict
from p_tqdm import p_map
from miplearn.solvers.internal import InternalSolver
from miplearn.components.component import Component
from miplearn.components.cuts import UserCutsComponent
from miplearn.components.lazy_dynamic import DynamicLazyConstraintsComponent
from miplearn.components.objective import ObjectiveValueComponent
@ -80,21 +82,21 @@ class LearningSolver:
def __init__(
self,
components=None,
mode="exact",
solver=None,
use_lazy_cb=False,
solve_lp_first=True,
simulate_perfect=False,
components: List[Component] = None,
mode: str = "exact",
solver: Callable[[], InternalSolver] = None,
use_lazy_cb: bool = False,
solve_lp_first: bool = True,
simulate_perfect: bool = False,
):
if solver is None:
solver = GurobiPyomoSolver
assert callable(solver), f"Callable expected. Found {solver.__class__} instead."
self.components = {}
self.components: Dict[str, Component] = {}
self.mode = mode
self.internal_solver = None
self.solver_factory = solver
self.internal_solver: Optional[InternalSolver] = None
self.solver_factory: Callable[[], InternalSolver] = solver
self.use_lazy_cb = use_lazy_cb
self.tee = False
self.solve_lp_first = solve_lp_first
@ -105,13 +107,11 @@ class LearningSolver:
self._add_component(comp)
else:
self._add_component(ObjectiveValueComponent())
self._add_component(PrimalSolutionComponent())
self._add_component(PrimalSolutionComponent(mode=mode))
self._add_component(DynamicLazyConstraintsComponent())
self._add_component(UserCutsComponent())
assert self.mode in ["exact", "heuristic"]
for component in self.components.values():
component.mode = self.mode
def solve(
self,
@ -216,18 +216,20 @@ class LearningSolver:
# Initialize internal solver
self.tee = tee
self.internal_solver = self.solver_factory()
assert self.internal_solver is not None
assert isinstance(self.internal_solver, InternalSolver)
self.internal_solver.set_instance(instance, model)
# Solve linear relaxation
if self.solve_lp_first:
logger.info("Solving LP relaxation...")
stats = self.internal_solver.solve_lp(tee=tee)
lp_stats = self.internal_solver.solve_lp(tee=tee)
training_sample["LP solution"] = self.internal_solver.get_solution()
training_sample["LP value"] = stats["Optimal value"]
training_sample["LP log"] = stats["Log"]
training_sample["LP value"] = lp_stats["Optimal value"]
training_sample["LP log"] = lp_stats["Log"]
else:
training_sample["LP solution"] = self.internal_solver.get_empty_solution()
training_sample["LP value"] = 0
training_sample["LP value"] = 0.0
# Before-solve callbacks
logger.debug("Running before_solve callbacks...")
@ -347,7 +349,7 @@ class LearningSolver:
for component in self.components.values():
component.fit(training_instances)
def _add_component(self, component):
def _add_component(self, component: Component) -> None:
name = component.__class__.__name__
self.components[name] = component

@ -10,12 +10,12 @@ TrainingSample = TypedDict(
"TrainingSample",
{
"LP log": str,
"LP solution": Dict,
"LP value": float,
"Lower bound": float,
"LP solution": Optional[Dict],
"LP value": Optional[float],
"Lower bound": Optional[float],
"MIP log": str,
"Solution": Dict,
"Upper bound": float,
"Solution": Optional[Dict],
"Upper bound": Optional[float],
"slacks": Dict,
},
total=False,

Loading…
Cancel
Save