mirror of
https://github.com/ANL-CEEESA/MIPLearn.git
synced 2025-12-06 01:18:52 -06:00
Add type annotations to components
This commit is contained in:
2
.github/workflows/test.yml
vendored
2
.github/workflows/test.yml
vendored
@@ -5,7 +5,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
python-version: [3.6, 3.7, 3.8]
|
python-version: [3.7, 3.8, 3.9]
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Check out source code
|
- name: Check out source code
|
||||||
|
|||||||
@@ -4,6 +4,7 @@
|
|||||||
|
|
||||||
import logging
|
import logging
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
|
from typing import Any, Dict
|
||||||
|
|
||||||
from sklearn.linear_model import LogisticRegression
|
from sklearn.linear_model import LogisticRegression
|
||||||
from sklearn.neighbors import KNeighborsClassifier
|
from sklearn.neighbors import KNeighborsClassifier
|
||||||
@@ -25,9 +26,9 @@ class AdaptiveClassifier(Classifier):
|
|||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
candidates=None,
|
candidates: Dict[str, Any] = None,
|
||||||
evaluator=ClassifierEvaluator(),
|
evaluator: ClassifierEvaluator = ClassifierEvaluator(),
|
||||||
):
|
) -> None:
|
||||||
"""
|
"""
|
||||||
Initializes the meta-classifier.
|
Initializes the meta-classifier.
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ class CountingClassifier(Classifier):
|
|||||||
counts how many times each label appeared, hence the name.
|
counts how many times each label appeared, hence the name.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self) -> None:
|
||||||
self.mean = None
|
self.mean = None
|
||||||
|
|
||||||
def fit(self, x_train, y_train):
|
def fit(self, x_train, y_train):
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ from sklearn.metrics import roc_auc_score
|
|||||||
|
|
||||||
|
|
||||||
class ClassifierEvaluator:
|
class ClassifierEvaluator:
|
||||||
def __init__(self):
|
def __init__(self) -> None:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def evaluate(self, clf, x_train, y_train):
|
def evaluate(self, clf, x_train, y_train):
|
||||||
|
|||||||
@@ -7,10 +7,17 @@ from abc import abstractmethod, ABC
|
|||||||
import numpy as np
|
import numpy as np
|
||||||
from sklearn.metrics._ranking import _binary_clf_curve
|
from sklearn.metrics._ranking import _binary_clf_curve
|
||||||
|
|
||||||
|
from miplearn.classifiers import Classifier
|
||||||
|
|
||||||
|
|
||||||
class DynamicThreshold(ABC):
|
class DynamicThreshold(ABC):
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def find(self, clf, x_train, y_train):
|
def find(
|
||||||
|
self,
|
||||||
|
clf: Classifier,
|
||||||
|
x_train: np.ndarray,
|
||||||
|
y_train: np.ndarray,
|
||||||
|
) -> float:
|
||||||
"""
|
"""
|
||||||
Given a trained binary classifier `clf` and a training data set,
|
Given a trained binary classifier `clf` and a training data set,
|
||||||
returns the numerical threshold (float) satisfying some criterea.
|
returns the numerical threshold (float) satisfying some criterea.
|
||||||
@@ -24,7 +31,7 @@ class MinPrecisionThreshold(DynamicThreshold):
|
|||||||
positive rate (also known as precision).
|
positive rate (also known as precision).
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, min_precision):
|
def __init__(self, min_precision: float) -> None:
|
||||||
self.min_precision = min_precision
|
self.min_precision = min_precision
|
||||||
|
|
||||||
def find(self, clf, x_train, y_train):
|
def find(self, clf, x_train, y_train):
|
||||||
|
|||||||
@@ -2,8 +2,16 @@
|
|||||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||||
# Released under the modified BSD license. See COPYING.md for more details.
|
# Released under the modified BSD license. See COPYING.md for more details.
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
|
from typing import Any, List, Union, TYPE_CHECKING
|
||||||
|
|
||||||
|
from miplearn.instance import Instance
|
||||||
|
from miplearn.types import MIPSolveStats, TrainingSample
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from miplearn.solvers.learning import LearningSolver
|
||||||
|
|
||||||
|
|
||||||
class Component(ABC):
|
class Component(ABC):
|
||||||
@@ -15,18 +23,35 @@ class Component(ABC):
|
|||||||
strategy.
|
strategy.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def before_solve(self, solver, instance, model):
|
def before_solve(
|
||||||
|
self,
|
||||||
|
solver: LearningSolver,
|
||||||
|
instance: Instance,
|
||||||
|
model: Any,
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Method called by LearningSolver before the problem is solved.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
solver
|
||||||
|
The solver calling this method.
|
||||||
|
instance
|
||||||
|
The instance being solved.
|
||||||
|
model
|
||||||
|
The concrete optimization model being solved.
|
||||||
|
"""
|
||||||
return
|
return
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def after_solve(
|
def after_solve(
|
||||||
self,
|
self,
|
||||||
solver,
|
solver: LearningSolver,
|
||||||
instance,
|
instance: Instance,
|
||||||
model,
|
model: Any,
|
||||||
stats,
|
stats: MIPSolveStats,
|
||||||
training_data,
|
training_data: TrainingSample,
|
||||||
):
|
) -> None:
|
||||||
"""
|
"""
|
||||||
Method called by LearningSolver after the problem is solved to optimality.
|
Method called by LearningSolver after the problem is solved to optimality.
|
||||||
|
|
||||||
@@ -40,19 +65,23 @@ class Component(ABC):
|
|||||||
The concrete optimization model being solved.
|
The concrete optimization model being solved.
|
||||||
stats: dict
|
stats: dict
|
||||||
A dictionary containing statistics about the solution process, such as
|
A dictionary containing statistics about the solution process, such as
|
||||||
number of nodes explored and running time. Components are free to add their own
|
number of nodes explored and running time. Components are free to add
|
||||||
statistics here. For example, PrimalSolutionComponent adds statistics regarding
|
their own statistics here. For example, PrimalSolutionComponent adds
|
||||||
the number of predicted variables. All statistics in this dictionary are exported
|
statistics regarding the number of predicted variables. All statistics in
|
||||||
to the benchmark CSV file.
|
this dictionary are exported to the benchmark CSV file.
|
||||||
training_data: dict
|
training_data: dict
|
||||||
A dictionary containing data that may be useful for training machine learning
|
A dictionary containing data that may be useful for training machine
|
||||||
models and accelerating the solution process. Components are free to add their
|
learning models and accelerating the solution process. Components are
|
||||||
own training data here. For example, PrimalSolutionComponent adds the current
|
free to add their own training data here. For example,
|
||||||
primal solution. The data must be pickable.
|
PrimalSolutionComponent adds the current primal solution. The data must
|
||||||
|
be pickable.
|
||||||
"""
|
"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def fit(self, training_instances):
|
def fit(
|
||||||
|
self,
|
||||||
|
training_instances: Union[List[str], List[Instance]],
|
||||||
|
) -> None:
|
||||||
return
|
return
|
||||||
|
|
||||||
def iteration_cb(self, solver, instance, model):
|
def iteration_cb(self, solver, instance, model):
|
||||||
|
|||||||
@@ -5,10 +5,12 @@
|
|||||||
import logging
|
import logging
|
||||||
import sys
|
import sys
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
|
from typing import Any, Dict
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from tqdm.auto import tqdm
|
from tqdm.auto import tqdm
|
||||||
|
|
||||||
|
from miplearn.classifiers import Classifier
|
||||||
from miplearn.classifiers.counting import CountingClassifier
|
from miplearn.classifiers.counting import CountingClassifier
|
||||||
from miplearn.components import classifier_evaluation_dict
|
from miplearn.components import classifier_evaluation_dict
|
||||||
from miplearn.components.component import Component
|
from miplearn.components.component import Component
|
||||||
@@ -24,15 +26,12 @@ class UserCutsComponent(Component):
|
|||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
classifier=CountingClassifier(),
|
classifier: Classifier = CountingClassifier(),
|
||||||
threshold=0.05,
|
threshold: float = 0.05,
|
||||||
):
|
):
|
||||||
self.violations = set()
|
self.threshold: float = threshold
|
||||||
self.count = {}
|
self.classifier_prototype: Classifier = classifier
|
||||||
self.n_samples = 0
|
self.classifiers: Dict[Any, Classifier] = {}
|
||||||
self.threshold = threshold
|
|
||||||
self.classifier_prototype = classifier
|
|
||||||
self.classifiers = {}
|
|
||||||
|
|
||||||
def before_solve(self, solver, instance, model):
|
def before_solve(self, solver, instance, model):
|
||||||
instance.found_violated_user_cuts = []
|
instance.found_violated_user_cuts = []
|
||||||
|
|||||||
@@ -5,10 +5,12 @@
|
|||||||
import logging
|
import logging
|
||||||
import sys
|
import sys
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
|
from typing import Any, Dict
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from tqdm.auto import tqdm
|
from tqdm.auto import tqdm
|
||||||
|
|
||||||
|
from miplearn.classifiers import Classifier
|
||||||
from miplearn.classifiers.counting import CountingClassifier
|
from miplearn.classifiers.counting import CountingClassifier
|
||||||
from miplearn.components import classifier_evaluation_dict
|
from miplearn.components import classifier_evaluation_dict
|
||||||
from miplearn.components.component import Component
|
from miplearn.components.component import Component
|
||||||
@@ -24,15 +26,12 @@ class DynamicLazyConstraintsComponent(Component):
|
|||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
classifier=CountingClassifier(),
|
classifier: Classifier = CountingClassifier(),
|
||||||
threshold=0.05,
|
threshold: float = 0.05,
|
||||||
):
|
):
|
||||||
self.violations = set()
|
self.threshold: float = threshold
|
||||||
self.count = {}
|
self.classifier_prototype: Classifier = classifier
|
||||||
self.n_samples = 0
|
self.classifiers: Dict[Any, Classifier] = {}
|
||||||
self.threshold = threshold
|
|
||||||
self.classifier_prototype = classifier
|
|
||||||
self.classifiers = {}
|
|
||||||
|
|
||||||
def before_solve(self, solver, instance, model):
|
def before_solve(self, solver, instance, model):
|
||||||
instance.found_violated_lazy_constraints = []
|
instance.found_violated_lazy_constraints = []
|
||||||
|
|||||||
@@ -15,6 +15,7 @@ from sklearn.metrics import (
|
|||||||
r2_score,
|
r2_score,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
from miplearn.classifiers import Regressor
|
||||||
from miplearn.components.component import Component
|
from miplearn.components.component import Component
|
||||||
from miplearn.extractors import InstanceFeaturesExtractor, ObjectiveValueExtractor
|
from miplearn.extractors import InstanceFeaturesExtractor, ObjectiveValueExtractor
|
||||||
|
|
||||||
@@ -26,7 +27,10 @@ class ObjectiveValueComponent(Component):
|
|||||||
A Component which predicts the optimal objective value of the problem.
|
A Component which predicts the optimal objective value of the problem.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, regressor=LinearRegression()):
|
def __init__(
|
||||||
|
self,
|
||||||
|
regressor: Regressor = LinearRegression(),
|
||||||
|
) -> None:
|
||||||
self.ub_regressor = None
|
self.ub_regressor = None
|
||||||
self.lb_regressor = None
|
self.lb_regressor = None
|
||||||
self.regressor_prototype = regressor
|
self.regressor_prototype = regressor
|
||||||
|
|||||||
@@ -2,14 +2,19 @@
|
|||||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||||
# Released under the modified BSD license. See COPYING.md for more details.
|
# Released under the modified BSD license. See COPYING.md for more details.
|
||||||
|
|
||||||
|
import logging
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
import sys
|
from typing import Union, Dict, Any
|
||||||
|
|
||||||
from .component import Component
|
import numpy as np
|
||||||
from ..classifiers.adaptive import AdaptiveClassifier
|
from tqdm.auto import tqdm
|
||||||
from ..classifiers.threshold import MinPrecisionThreshold, DynamicThreshold
|
|
||||||
from ..components import classifier_evaluation_dict
|
from miplearn.classifiers import Classifier
|
||||||
from ..extractors import *
|
from miplearn.classifiers.adaptive import AdaptiveClassifier
|
||||||
|
from miplearn.classifiers.threshold import MinPrecisionThreshold, DynamicThreshold
|
||||||
|
from miplearn.components import classifier_evaluation_dict
|
||||||
|
from miplearn.components.component import Component
|
||||||
|
from miplearn.extractors import VariableFeaturesExtractor, SolutionExtractor, Extractor
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -21,13 +26,13 @@ class PrimalSolutionComponent(Component):
|
|||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
classifier=AdaptiveClassifier(),
|
classifier: Classifier = AdaptiveClassifier(),
|
||||||
mode="exact",
|
mode: str = "exact",
|
||||||
threshold=MinPrecisionThreshold(0.98),
|
threshold: Union[float, DynamicThreshold] = MinPrecisionThreshold(0.98),
|
||||||
):
|
) -> None:
|
||||||
self.mode = mode
|
self.mode = mode
|
||||||
self.classifiers = {}
|
self.classifiers: Dict[Any, Classifier] = {}
|
||||||
self.thresholds = {}
|
self.thresholds: Dict[Any, Union[float, DynamicThreshold]] = {}
|
||||||
self.threshold_prototype = threshold
|
self.threshold_prototype = threshold
|
||||||
self.classifier_prototype = classifier
|
self.classifier_prototype = classifier
|
||||||
|
|
||||||
|
|||||||
@@ -8,10 +8,12 @@ import os
|
|||||||
import pickle
|
import pickle
|
||||||
import tempfile
|
import tempfile
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
from typing import Optional, List, Any, IO, cast, BinaryIO, Union
|
from typing import Optional, List, Any, IO, cast, BinaryIO, Union, Callable, Dict
|
||||||
|
|
||||||
from p_tqdm import p_map
|
from p_tqdm import p_map
|
||||||
|
|
||||||
|
from miplearn.solvers.internal import InternalSolver
|
||||||
|
from miplearn.components.component import Component
|
||||||
from miplearn.components.cuts import UserCutsComponent
|
from miplearn.components.cuts import UserCutsComponent
|
||||||
from miplearn.components.lazy_dynamic import DynamicLazyConstraintsComponent
|
from miplearn.components.lazy_dynamic import DynamicLazyConstraintsComponent
|
||||||
from miplearn.components.objective import ObjectiveValueComponent
|
from miplearn.components.objective import ObjectiveValueComponent
|
||||||
@@ -80,21 +82,21 @@ class LearningSolver:
|
|||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
components=None,
|
components: List[Component] = None,
|
||||||
mode="exact",
|
mode: str = "exact",
|
||||||
solver=None,
|
solver: Callable[[], InternalSolver] = None,
|
||||||
use_lazy_cb=False,
|
use_lazy_cb: bool = False,
|
||||||
solve_lp_first=True,
|
solve_lp_first: bool = True,
|
||||||
simulate_perfect=False,
|
simulate_perfect: bool = False,
|
||||||
):
|
):
|
||||||
if solver is None:
|
if solver is None:
|
||||||
solver = GurobiPyomoSolver
|
solver = GurobiPyomoSolver
|
||||||
assert callable(solver), f"Callable expected. Found {solver.__class__} instead."
|
assert callable(solver), f"Callable expected. Found {solver.__class__} instead."
|
||||||
|
|
||||||
self.components = {}
|
self.components: Dict[str, Component] = {}
|
||||||
self.mode = mode
|
self.mode = mode
|
||||||
self.internal_solver = None
|
self.internal_solver: Optional[InternalSolver] = None
|
||||||
self.solver_factory = solver
|
self.solver_factory: Callable[[], InternalSolver] = solver
|
||||||
self.use_lazy_cb = use_lazy_cb
|
self.use_lazy_cb = use_lazy_cb
|
||||||
self.tee = False
|
self.tee = False
|
||||||
self.solve_lp_first = solve_lp_first
|
self.solve_lp_first = solve_lp_first
|
||||||
@@ -105,13 +107,11 @@ class LearningSolver:
|
|||||||
self._add_component(comp)
|
self._add_component(comp)
|
||||||
else:
|
else:
|
||||||
self._add_component(ObjectiveValueComponent())
|
self._add_component(ObjectiveValueComponent())
|
||||||
self._add_component(PrimalSolutionComponent())
|
self._add_component(PrimalSolutionComponent(mode=mode))
|
||||||
self._add_component(DynamicLazyConstraintsComponent())
|
self._add_component(DynamicLazyConstraintsComponent())
|
||||||
self._add_component(UserCutsComponent())
|
self._add_component(UserCutsComponent())
|
||||||
|
|
||||||
assert self.mode in ["exact", "heuristic"]
|
assert self.mode in ["exact", "heuristic"]
|
||||||
for component in self.components.values():
|
|
||||||
component.mode = self.mode
|
|
||||||
|
|
||||||
def solve(
|
def solve(
|
||||||
self,
|
self,
|
||||||
@@ -216,18 +216,20 @@ class LearningSolver:
|
|||||||
# Initialize internal solver
|
# Initialize internal solver
|
||||||
self.tee = tee
|
self.tee = tee
|
||||||
self.internal_solver = self.solver_factory()
|
self.internal_solver = self.solver_factory()
|
||||||
|
assert self.internal_solver is not None
|
||||||
|
assert isinstance(self.internal_solver, InternalSolver)
|
||||||
self.internal_solver.set_instance(instance, model)
|
self.internal_solver.set_instance(instance, model)
|
||||||
|
|
||||||
# Solve linear relaxation
|
# Solve linear relaxation
|
||||||
if self.solve_lp_first:
|
if self.solve_lp_first:
|
||||||
logger.info("Solving LP relaxation...")
|
logger.info("Solving LP relaxation...")
|
||||||
stats = self.internal_solver.solve_lp(tee=tee)
|
lp_stats = self.internal_solver.solve_lp(tee=tee)
|
||||||
training_sample["LP solution"] = self.internal_solver.get_solution()
|
training_sample["LP solution"] = self.internal_solver.get_solution()
|
||||||
training_sample["LP value"] = stats["Optimal value"]
|
training_sample["LP value"] = lp_stats["Optimal value"]
|
||||||
training_sample["LP log"] = stats["Log"]
|
training_sample["LP log"] = lp_stats["Log"]
|
||||||
else:
|
else:
|
||||||
training_sample["LP solution"] = self.internal_solver.get_empty_solution()
|
training_sample["LP solution"] = self.internal_solver.get_empty_solution()
|
||||||
training_sample["LP value"] = 0
|
training_sample["LP value"] = 0.0
|
||||||
|
|
||||||
# Before-solve callbacks
|
# Before-solve callbacks
|
||||||
logger.debug("Running before_solve callbacks...")
|
logger.debug("Running before_solve callbacks...")
|
||||||
@@ -347,7 +349,7 @@ class LearningSolver:
|
|||||||
for component in self.components.values():
|
for component in self.components.values():
|
||||||
component.fit(training_instances)
|
component.fit(training_instances)
|
||||||
|
|
||||||
def _add_component(self, component):
|
def _add_component(self, component: Component) -> None:
|
||||||
name = component.__class__.__name__
|
name = component.__class__.__name__
|
||||||
self.components[name] = component
|
self.components[name] = component
|
||||||
|
|
||||||
|
|||||||
@@ -10,12 +10,12 @@ TrainingSample = TypedDict(
|
|||||||
"TrainingSample",
|
"TrainingSample",
|
||||||
{
|
{
|
||||||
"LP log": str,
|
"LP log": str,
|
||||||
"LP solution": Dict,
|
"LP solution": Optional[Dict],
|
||||||
"LP value": float,
|
"LP value": Optional[float],
|
||||||
"Lower bound": float,
|
"Lower bound": Optional[float],
|
||||||
"MIP log": str,
|
"MIP log": str,
|
||||||
"Solution": Dict,
|
"Solution": Optional[Dict],
|
||||||
"Upper bound": float,
|
"Upper bound": Optional[float],
|
||||||
"slacks": Dict,
|
"slacks": Dict,
|
||||||
},
|
},
|
||||||
total=False,
|
total=False,
|
||||||
|
|||||||
Reference in New Issue
Block a user