Objective: Rewrite sample_evaluate

master
Alinson S. Xavier 5 years ago
parent 7af22bd16b
commit 185b95118a
No known key found for this signature in database
GPG Key ID: DCA0DAD4D2F58624

@ -4,7 +4,12 @@
from typing import Dict from typing import Dict
def classifier_evaluation_dict(tp: int, tn: int, fp: int, fn: int) -> Dict: def classifier_evaluation_dict(
tp: int,
tn: int,
fp: int,
fn: int,
) -> Dict[str, float]:
p = tp + fn p = tp + fn
n = fp + tn n = fp + tn
d: Dict = { d: Dict = {

@ -3,7 +3,7 @@
# Released under the modified BSD license. See COPYING.md for more details. # Released under the modified BSD license. See COPYING.md for more details.
import numpy as np import numpy as np
from typing import Any, List, Union, TYPE_CHECKING, Tuple, Dict, Optional from typing import Any, List, Union, TYPE_CHECKING, Tuple, Dict, Optional, Hashable
from miplearn.extractors import InstanceIterator from miplearn.extractors import InstanceIterator
from miplearn.instance import Instance from miplearn.instance import Instance
@ -205,5 +205,9 @@ class Component:
ev += [self.sample_evaluate(instance.features, sample)] ev += [self.sample_evaluate(instance.features, sample)]
return ev return ev
def sample_evaluate(self, features: Features, sample: TrainingSample) -> Dict: def sample_evaluate(
self,
features: Features,
sample: TrainingSample,
) -> Dict[Hashable, Dict[str, float]]:
return {} return {}

@ -3,7 +3,7 @@
# Released under the modified BSD license. See COPYING.md for more details. # Released under the modified BSD license. See COPYING.md for more details.
import logging import logging
from typing import List, Dict, Union, Optional, Any, TYPE_CHECKING, Tuple from typing import List, Dict, Union, Optional, Any, TYPE_CHECKING, Tuple, Hashable
import numpy as np import numpy as np
from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression
@ -149,3 +149,25 @@ class ObjectiveValueComponent(Component):
if "Upper bound" in sample and sample["Upper bound"] is not None: if "Upper bound" in sample and sample["Upper bound"] is not None:
y["Upper bound"] = [[sample["Upper bound"]]] y["Upper bound"] = [[sample["Upper bound"]]]
return x, y return x, y
def sample_evaluate(
self,
features: Features,
sample: TrainingSample,
) -> Dict[Hashable, Dict[str, float]]:
def compare(y_pred: float, y_actual: float) -> Dict[str, float]:
err = np.round(abs(y_pred - y_actual), 8)
return {
"Actual value": y_actual,
"Predicted value": y_pred,
"Absolute error": err,
"Relative error": err / y_actual,
}
result: Dict[Hashable, Dict[str, float]] = {}
pred = self.sample_predict(features, sample)
if "Upper bound" in sample and sample["Upper bound"] is not None:
result["Upper bound"] = compare(pred["Upper bound"], sample["Upper bound"])
if "Lower bound" in sample and sample["Lower bound"] is not None:
result["Lower bound"] = compare(pred["Lower bound"], sample["Lower bound"])
return result

@ -186,7 +186,7 @@ class PrimalSolutionComponent(Component):
self, self,
features: Features, features: Features,
sample: TrainingSample, sample: TrainingSample,
) -> Dict: ) -> Dict[Hashable, Dict[str, float]]:
solution_actual = sample["Solution"] solution_actual = sample["Solution"]
assert solution_actual is not None assert solution_actual is not None
solution_pred = self.sample_predict(features, sample) solution_pred = self.sample_predict(features, sample)

@ -14,96 +14,6 @@ from tests.fixtures.knapsack import get_knapsack_instance
import numpy as np import numpy as np
# def test_x_y_predict() -> None:
# # Construct instance
# instance = cast(Instance, Mock(spec=Instance))
# instance.get_instance_features = Mock( # type: ignore
# return_value=[1.0, 2.0],
# )
# instance.training_data = [
# {
# "Lower bound": 1.0,
# "Upper bound": 2.0,
# "LP value": 3.0,
# },
# {
# "Lower bound": 1.5,
# "Upper bound": 2.2,
# "LP value": 3.4,
# },
# ]
#
# # Construct mock regressors
# lb_regressor = Mock(spec=Regressor)
# lb_regressor.predict = Mock(return_value=np.array([[5.0], [6.0]]))
# lb_regressor.clone = lambda: lb_regressor
# ub_regressor = Mock(spec=Regressor)
# ub_regressor.predict = Mock(return_value=np.array([[3.0], [3.0]]))
# ub_regressor.clone = lambda: ub_regressor
# comp = ObjectiveValueComponent(
# lb_regressor=lb_regressor,
# ub_regressor=ub_regressor,
# )
#
# # Should build x correctly
# x_expected = np.array([[1.0, 2.0, 3.0], [1.0, 2.0, 3.4]])
# assert_array_equal(comp.x([instance]), x_expected)
#
# # Should build y correctly
# y_actual = comp.y([instance])
# y_expected_lb = np.array([[1.0], [1.5]])
# y_expected_ub = np.array([[2.0], [2.2]])
# assert_array_equal(y_actual["Lower bound"], y_expected_lb)
# assert_array_equal(y_actual["Upper bound"], y_expected_ub)
#
# # Should pass arrays to regressors
# comp.fit([instance])
# assert_array_equal(lb_regressor.fit.call_args[0][0], x_expected)
# assert_array_equal(lb_regressor.fit.call_args[0][1], y_expected_lb)
# assert_array_equal(ub_regressor.fit.call_args[0][0], x_expected)
# assert_array_equal(ub_regressor.fit.call_args[0][1], y_expected_ub)
#
# # Should return predictions
# pred = comp.predict([instance])
# assert_array_equal(lb_regressor.predict.call_args[0][0], x_expected)
# assert_array_equal(ub_regressor.predict.call_args[0][0], x_expected)
# assert pred == {
# "Lower bound": [5.0, 6.0],
# "Upper bound": [3.0, 3.0],
# }
# def test_obj_evaluate():
# instances, models = get_test_pyomo_instances()
# reg = Mock(spec=Regressor)
# reg.predict = Mock(return_value=np.array([[1000.0], [1000.0]]))
# reg.clone = lambda: reg
# comp = ObjectiveValueComponent(
# lb_regressor=reg,
# ub_regressor=reg,
# )
# comp.fit(instances)
# ev = comp.evaluate(instances)
# assert ev == {
# "Lower bound": {
# "Explained variance": 0.0,
# "Max error": 183.0,
# "Mean absolute error": 126.5,
# "Mean squared error": 19194.5,
# "Median absolute error": 126.5,
# "R2": -5.012843605607331,
# },
# "Upper bound": {
# "Explained variance": 0.0,
# "Max error": 183.0,
# "Mean absolute error": 126.5,
# "Mean squared error": 19194.5,
# "Median absolute error": 126.5,
# "R2": -5.012843605607331,
# },
# }
@pytest.fixture @pytest.fixture
def features() -> Features: def features() -> Features:
return { return {
@ -274,6 +184,29 @@ def test_sample_predict_without_ub(
assert_array_equal(comp.lb_regressor.predict.call_args[0][0], x["Lower bound"]) assert_array_equal(comp.lb_regressor.predict.call_args[0][0], x["Lower bound"])
def test_sample_evaluate(features: Features, sample: TrainingSample) -> None:
comp = ObjectiveValueComponent()
comp.lb_regressor = Mock(spec=Regressor)
comp.lb_regressor.predict = lambda _: np.array([[1.05]])
comp.ub_regressor = Mock(spec=Regressor)
comp.ub_regressor.predict = lambda _: np.array([[2.50]])
ev = comp.sample_evaluate(features, sample)
assert ev == {
"Lower bound": {
"Actual value": 1.0,
"Predicted value": 1.05,
"Absolute error": 0.05,
"Relative error": 0.05,
},
"Upper bound": {
"Actual value": 2.0,
"Predicted value": 2.50,
"Absolute error": 0.5,
"Relative error": 0.25,
},
}
def test_usage() -> None: def test_usage() -> None:
solver = LearningSolver(components=[ObjectiveValueComponent()]) solver = LearningSolver(components=[ObjectiveValueComponent()])
instance = get_knapsack_instance(GurobiPyomoSolver()) instance = get_knapsack_instance(GurobiPyomoSolver())

Loading…
Cancel
Save