mirror of
https://github.com/ANL-CEEESA/MIPLearn.git
synced 2025-12-06 09:28:51 -06:00
Refactor ObjectiveComponent
This commit is contained in:
@@ -3,7 +3,7 @@
|
|||||||
# Released under the modified BSD license. See COPYING.md for more details.
|
# Released under the modified BSD license. See COPYING.md for more details.
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
from copy import deepcopy
|
from typing import List, Dict, Union, Callable, Optional, Any, TYPE_CHECKING
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from sklearn.linear_model import LinearRegression
|
from sklearn.linear_model import LinearRegression
|
||||||
@@ -17,7 +17,12 @@ from sklearn.metrics import (
|
|||||||
|
|
||||||
from miplearn.classifiers import Regressor
|
from miplearn.classifiers import Regressor
|
||||||
from miplearn.components.component import Component
|
from miplearn.components.component import Component
|
||||||
from miplearn.extractors import InstanceFeaturesExtractor, ObjectiveValueExtractor
|
from miplearn.extractors import InstanceIterator
|
||||||
|
from miplearn.instance import Instance
|
||||||
|
from miplearn.types import MIPSolveStats, TrainingSample, LearningSolveStats
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from miplearn.solvers.learning import LearningSolver
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -29,58 +34,102 @@ class ObjectiveValueComponent(Component):
|
|||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
regressor: Regressor = LinearRegression(),
|
lb_regressor: Callable[[], Regressor] = LinearRegression,
|
||||||
|
ub_regressor: Callable[[], Regressor] = LinearRegression,
|
||||||
) -> None:
|
) -> None:
|
||||||
self.ub_regressor = None
|
self.ub_regressor: Optional[Regressor] = None
|
||||||
self.lb_regressor = None
|
self.lb_regressor: Optional[Regressor] = None
|
||||||
self.regressor_prototype = regressor
|
self.lb_regressor_factory = lb_regressor
|
||||||
|
self.ub_regressor_factory = ub_regressor
|
||||||
|
self._predicted_ub: Optional[float] = None
|
||||||
|
self._predicted_lb: Optional[float] = None
|
||||||
|
|
||||||
def before_solve(self, solver, instance, model):
|
def before_solve(
|
||||||
|
self,
|
||||||
|
solver: "LearningSolver",
|
||||||
|
instance: Instance,
|
||||||
|
model: Any,
|
||||||
|
) -> None:
|
||||||
if self.ub_regressor is not None:
|
if self.ub_regressor is not None:
|
||||||
logger.info("Predicting optimal value...")
|
logger.info("Predicting optimal value...")
|
||||||
lb, ub = self.predict([instance])[0]
|
pred = self.predict([instance])
|
||||||
instance.predicted_ub = ub
|
self._predicted_lb = pred["Upper bound"][0]
|
||||||
instance.predicted_lb = lb
|
self._predicted_ub = pred["Lower bound"][0]
|
||||||
logger.info("Predicted values: lb=%.2f, ub=%.2f" % (lb, ub))
|
logger.info(
|
||||||
|
"Predicted values: lb=%.2f, ub=%.2f"
|
||||||
|
% (
|
||||||
|
self._predicted_lb,
|
||||||
|
self._predicted_ub,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
def after_solve(
|
def after_solve(
|
||||||
self,
|
self,
|
||||||
solver,
|
solver: "LearningSolver",
|
||||||
instance,
|
instance: Instance,
|
||||||
model,
|
model: Any,
|
||||||
stats,
|
stats: LearningSolveStats,
|
||||||
training_data,
|
training_data: TrainingSample,
|
||||||
):
|
) -> None:
|
||||||
if self.ub_regressor is not None:
|
if self._predicted_ub is not None:
|
||||||
stats["Predicted UB"] = instance.predicted_ub
|
stats["Objective: predicted UB"] = self._predicted_ub
|
||||||
stats["Predicted LB"] = instance.predicted_lb
|
if self._predicted_lb is not None:
|
||||||
else:
|
stats["Objective: predicted LB"] = self._predicted_lb
|
||||||
stats["Predicted UB"] = None
|
|
||||||
stats["Predicted LB"] = None
|
|
||||||
|
|
||||||
def fit(self, training_instances):
|
def fit(self, training_instances: Union[List[str], List[Instance]]) -> None:
|
||||||
|
self.lb_regressor = self.lb_regressor_factory()
|
||||||
|
self.ub_regressor = self.ub_regressor_factory()
|
||||||
logger.debug("Extracting features...")
|
logger.debug("Extracting features...")
|
||||||
features = InstanceFeaturesExtractor().extract(training_instances)
|
x_train = self.x(training_instances)
|
||||||
ub = ObjectiveValueExtractor(kind="upper bound").extract(training_instances)
|
y_train = self.y(training_instances)
|
||||||
lb = ObjectiveValueExtractor(kind="lower bound").extract(training_instances)
|
logger.debug("Fitting lb_regressor...")
|
||||||
assert ub.shape == (len(training_instances), 1)
|
self.lb_regressor.fit(x_train, y_train["Lower bound"])
|
||||||
assert lb.shape == (len(training_instances), 1)
|
|
||||||
self.ub_regressor = deepcopy(self.regressor_prototype)
|
|
||||||
self.lb_regressor = deepcopy(self.regressor_prototype)
|
|
||||||
logger.debug("Fitting ub_regressor...")
|
logger.debug("Fitting ub_regressor...")
|
||||||
self.ub_regressor.fit(features, ub.ravel())
|
self.ub_regressor.fit(x_train, y_train["Upper bound"])
|
||||||
logger.debug("Fitting ub_regressor...")
|
|
||||||
self.lb_regressor.fit(features, lb.ravel())
|
|
||||||
|
|
||||||
def predict(self, instances):
|
def predict(
|
||||||
features = InstanceFeaturesExtractor().extract(instances)
|
self,
|
||||||
lb = self.lb_regressor.predict(features)
|
instances: Union[List[str], List[Instance]],
|
||||||
ub = self.ub_regressor.predict(features)
|
) -> Dict[str, List[float]]:
|
||||||
assert lb.shape == (len(instances),)
|
assert self.lb_regressor is not None
|
||||||
assert ub.shape == (len(instances),)
|
assert self.ub_regressor is not None
|
||||||
return np.array([lb, ub]).T
|
x_test = self.x(instances)
|
||||||
|
(n_samples, n_features) = x_test.shape
|
||||||
|
lb = self.lb_regressor.predict(x_test)
|
||||||
|
ub = self.ub_regressor.predict(x_test)
|
||||||
|
assert lb.shape == (n_samples, 1)
|
||||||
|
assert ub.shape == (n_samples, 1)
|
||||||
|
return {
|
||||||
|
"Lower bound": lb.ravel().tolist(),
|
||||||
|
"Upper bound": ub.ravel().tolist(),
|
||||||
|
}
|
||||||
|
|
||||||
def evaluate(self, instances):
|
@staticmethod
|
||||||
|
def x(instances: Union[List[str], List[Instance]]) -> np.ndarray:
|
||||||
|
result = []
|
||||||
|
for instance in InstanceIterator(instances):
|
||||||
|
for _ in instance.training_data:
|
||||||
|
instance_features = instance.get_instance_features()
|
||||||
|
result.append(instance_features)
|
||||||
|
return np.array(result)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def y(instances: Union[List[str], List[Instance]]) -> Dict[str, np.ndarray]:
|
||||||
|
ub: List[List[float]] = []
|
||||||
|
lb: List[List[float]] = []
|
||||||
|
for instance in InstanceIterator(instances):
|
||||||
|
for sample in instance.training_data:
|
||||||
|
lb.append([sample["Lower bound"]])
|
||||||
|
ub.append([sample["Upper bound"]])
|
||||||
|
return {
|
||||||
|
"Lower bound": np.array(lb),
|
||||||
|
"Upper bound": np.array(ub),
|
||||||
|
}
|
||||||
|
|
||||||
|
def evaluate(
|
||||||
|
self,
|
||||||
|
instances: Union[List[str], List[Instance]],
|
||||||
|
) -> Dict[str, Dict[str, float]]:
|
||||||
y_pred = self.predict(instances)
|
y_pred = self.predict(instances)
|
||||||
y_true = np.array(
|
y_true = np.array(
|
||||||
[
|
[
|
||||||
@@ -88,11 +137,12 @@ class ObjectiveValueComponent(Component):
|
|||||||
inst.training_data[0]["Lower bound"],
|
inst.training_data[0]["Lower bound"],
|
||||||
inst.training_data[0]["Upper bound"],
|
inst.training_data[0]["Upper bound"],
|
||||||
]
|
]
|
||||||
for inst in instances
|
for inst in InstanceIterator(instances)
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
y_true_lb, y_true_ub = y_true[:, 0], y_true[:, 1]
|
y_pred_lb = y_pred["Lower bound"]
|
||||||
y_pred_lb, y_pred_ub = y_pred[:, 1], y_pred[:, 1]
|
y_pred_ub = y_pred["Upper bound"]
|
||||||
|
y_true_lb, y_true_ub = y_true[:, 1], y_true[:, 1]
|
||||||
ev = {
|
ev = {
|
||||||
"Lower bound": {
|
"Lower bound": {
|
||||||
"Mean squared error": mean_squared_error(y_true_lb, y_pred_lb),
|
"Mean squared error": mean_squared_error(y_true_lb, y_pred_lb),
|
||||||
|
|||||||
@@ -117,9 +117,6 @@ class MaxWeightStableSetInstance(Instance):
|
|||||||
model.clique_eqs.add(sum(model.x[i] for i in clique) <= 1)
|
model.clique_eqs.add(sum(model.x[i] for i in clique) <= 1)
|
||||||
return model
|
return model
|
||||||
|
|
||||||
def get_instance_features(self):
|
|
||||||
return np.ones(0)
|
|
||||||
|
|
||||||
def get_variable_features(self, var, index):
|
def get_variable_features(self, var, index):
|
||||||
neighbor_weights = [0] * 15
|
neighbor_weights = [0] * 15
|
||||||
neighbor_degrees = [100] * 15
|
neighbor_degrees = [100] * 15
|
||||||
|
|||||||
@@ -65,6 +65,8 @@ LearningSolveStats = TypedDict(
|
|||||||
"Primal: free": int,
|
"Primal: free": int,
|
||||||
"Primal: zero": int,
|
"Primal: zero": int,
|
||||||
"Primal: one": int,
|
"Primal: one": int,
|
||||||
|
"Objective: predicted LB": float,
|
||||||
|
"Objective: predicted UB": float,
|
||||||
},
|
},
|
||||||
total=False,
|
total=False,
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,33 +1,81 @@
|
|||||||
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
|
||||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||||
# Released under the modified BSD license. See COPYING.md for more details.
|
# Released under the modified BSD license. See COPYING.md for more details.
|
||||||
|
from typing import cast
|
||||||
from unittest.mock import Mock
|
from unittest.mock import Mock
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
from numpy.testing import assert_array_equal
|
||||||
|
|
||||||
|
from miplearn.instance import Instance
|
||||||
from miplearn.classifiers import Regressor
|
from miplearn.classifiers import Regressor
|
||||||
from miplearn.components.objective import ObjectiveValueComponent
|
from miplearn.components.objective import ObjectiveValueComponent
|
||||||
from .. import get_test_pyomo_instances
|
from .. import get_test_pyomo_instances
|
||||||
|
|
||||||
|
|
||||||
def test_usage():
|
def test_x_y_predict() -> None:
|
||||||
instances, models = get_test_pyomo_instances()
|
# Construct instance
|
||||||
comp = ObjectiveValueComponent()
|
instance = cast(Instance, Mock(spec=Instance))
|
||||||
comp.fit(instances)
|
instance.get_instance_features = Mock( # type: ignore
|
||||||
assert instances[0].training_data[0]["Lower bound"] == 1183.0
|
return_value=[1.0, 2.0],
|
||||||
assert instances[0].training_data[0]["Upper bound"] == 1183.0
|
)
|
||||||
assert np.round(comp.predict(instances), 2).tolist() == [
|
instance.training_data = [
|
||||||
[1183.0, 1183.0],
|
{
|
||||||
[1070.0, 1070.0],
|
"Lower bound": 1.0,
|
||||||
|
"Upper bound": 2.0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Lower bound": 1.5,
|
||||||
|
"Upper bound": 2.2,
|
||||||
|
},
|
||||||
]
|
]
|
||||||
|
|
||||||
|
# Construct mock regressors
|
||||||
|
lb_regressor = Mock(spec=Regressor)
|
||||||
|
lb_regressor.predict = Mock(return_value=np.array([[5.0], [6.0]]))
|
||||||
|
ub_regressor = Mock(spec=Regressor)
|
||||||
|
ub_regressor.predict = Mock(return_value=np.array([[3.0], [3.0]]))
|
||||||
|
comp = ObjectiveValueComponent(
|
||||||
|
lb_regressor=lambda: lb_regressor,
|
||||||
|
ub_regressor=lambda: ub_regressor,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Should build x correctly
|
||||||
|
x_expected = np.array([[1.0, 2.0], [1.0, 2.0]])
|
||||||
|
assert_array_equal(comp.x([instance]), x_expected)
|
||||||
|
|
||||||
|
# Should build y correctly
|
||||||
|
y_actual = comp.y([instance])
|
||||||
|
y_expected_lb = np.array([[1.0], [1.5]])
|
||||||
|
y_expected_ub = np.array([[2.0], [2.2]])
|
||||||
|
assert_array_equal(y_actual["Lower bound"], y_expected_lb)
|
||||||
|
assert_array_equal(y_actual["Upper bound"], y_expected_ub)
|
||||||
|
|
||||||
|
# Should pass arrays to regressors
|
||||||
|
comp.fit([instance])
|
||||||
|
assert_array_equal(lb_regressor.fit.call_args[0][0], x_expected)
|
||||||
|
assert_array_equal(lb_regressor.fit.call_args[0][1], y_expected_lb)
|
||||||
|
assert_array_equal(ub_regressor.fit.call_args[0][0], x_expected)
|
||||||
|
assert_array_equal(ub_regressor.fit.call_args[0][1], y_expected_ub)
|
||||||
|
|
||||||
|
# Should return predictions
|
||||||
|
pred = comp.predict([instance])
|
||||||
|
assert_array_equal(lb_regressor.predict.call_args[0][0], x_expected)
|
||||||
|
assert_array_equal(ub_regressor.predict.call_args[0][0], x_expected)
|
||||||
|
assert pred == {
|
||||||
|
"Lower bound": [5.0, 6.0],
|
||||||
|
"Upper bound": [3.0, 3.0],
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
def test_obj_evaluate():
|
def test_obj_evaluate():
|
||||||
instances, models = get_test_pyomo_instances()
|
instances, models = get_test_pyomo_instances()
|
||||||
reg = Mock(spec=Regressor)
|
reg = Mock(spec=Regressor)
|
||||||
reg.predict = Mock(return_value=np.array([1000.0, 1000.0]))
|
reg.predict = Mock(return_value=np.array([[1000.0], [1000.0]]))
|
||||||
comp = ObjectiveValueComponent(regressor=reg)
|
comp = ObjectiveValueComponent(
|
||||||
|
lb_regressor=lambda: reg,
|
||||||
|
ub_regressor=lambda: reg,
|
||||||
|
)
|
||||||
comp.fit(instances)
|
comp.fit(instances)
|
||||||
ev = comp.evaluate(instances)
|
ev = comp.evaluate(instances)
|
||||||
assert ev == {
|
assert ev == {
|
||||||
|
|||||||
@@ -130,7 +130,7 @@ def test_simulate_perfect():
|
|||||||
simulate_perfect=True,
|
simulate_perfect=True,
|
||||||
)
|
)
|
||||||
stats = solver.solve(tmp.name)
|
stats = solver.solve(tmp.name)
|
||||||
assert stats["Lower bound"] == stats["Predicted LB"]
|
assert stats["Lower bound"] == stats["Objective: predicted LB"]
|
||||||
|
|
||||||
|
|
||||||
def test_gap():
|
def test_gap():
|
||||||
|
|||||||
Reference in New Issue
Block a user