Convert TrainingSample to dataclass

master
Alinson S. Xavier 5 years ago
parent aeed338837
commit b11779817a
No known key found for this signature in database
GPG Key ID: DCA0DAD4D2F58624

@ -93,7 +93,7 @@ class StaticLazyConstraintsComponent(Component):
features: Features, features: Features,
training_data: TrainingSample, training_data: TrainingSample,
) -> None: ) -> None:
training_data["LazyStatic: Enforced"] = self.enforced_cids training_data.lazy_enforced = self.enforced_cids
stats["LazyStatic: Restored"] = self.n_restored stats["LazyStatic: Restored"] = self.n_restored
stats["LazyStatic: Iterations"] = self.n_iterations stats["LazyStatic: Iterations"] = self.n_iterations
@ -188,8 +188,8 @@ class StaticLazyConstraintsComponent(Component):
x[category] = [] x[category] = []
y[category] = [] y[category] = []
x[category] += [cfeatures.user_features] x[category] += [cfeatures.user_features]
if "LazyStatic: Enforced" in sample: if sample.lazy_enforced is not None:
if cid in sample["LazyStatic: Enforced"]: if cid in sample.lazy_enforced:
y[category] += [[False, True]] y[category] += [[False, True]]
else: else:
y[category] += [[True, False]] y[category] += [[True, False]]

@ -82,12 +82,14 @@ class ObjectiveValueComponent(Component):
x: Dict[Hashable, List[List[float]]] = {} x: Dict[Hashable, List[List[float]]] = {}
y: Dict[Hashable, List[List[float]]] = {} y: Dict[Hashable, List[List[float]]] = {}
f = list(features.instance.user_features) f = list(features.instance.user_features)
if "LP value" in sample and sample["LP value"] is not None: if sample.lp_value is not None:
f += [sample["LP value"]] f += [sample.lp_value]
for c in ["Upper bound", "Lower bound"]: x["Upper bound"] = [f]
x[c] = [f] x["Lower bound"] = [f]
if c in sample and sample[c] is not None: # type: ignore if sample.lower_bound is not None:
y[c] = [[sample[c]]] # type: ignore y["Lower bound"] = [[sample.lower_bound]]
if sample.upper_bound is not None:
y["Upper bound"] = [[sample.upper_bound]]
return x, y return x, y
def sample_evaluate( def sample_evaluate(
@ -106,7 +108,8 @@ class ObjectiveValueComponent(Component):
result: Dict[Hashable, Dict[str, float]] = {} result: Dict[Hashable, Dict[str, float]] = {}
pred = self.sample_predict(features, sample) pred = self.sample_predict(features, sample)
for c in ["Upper bound", "Lower bound"]: if sample.upper_bound is not None:
if c in sample and sample[c] is not None: # type: ignore result["Upper bound"] = compare(pred["Upper bound"], sample.upper_bound)
result[c] = compare(pred[c], sample[c]) # type: ignore if sample.lower_bound is not None:
result["Lower bound"] = compare(pred["Lower bound"], sample.lower_bound)
return result return result

@ -155,8 +155,8 @@ class PrimalSolutionComponent(Component):
x: Dict = {} x: Dict = {}
y: Dict = {} y: Dict = {}
solution: Optional[Solution] = None solution: Optional[Solution] = None
if "Solution" in sample and sample["Solution"] is not None: if sample.solution is not None:
solution = sample["Solution"] solution = sample.solution
for (var_name, var_dict) in features.variables.items(): for (var_name, var_dict) in features.variables.items():
for (idx, var_features) in var_dict.items(): for (idx, var_features) in var_dict.items():
category = var_features.category category = var_features.category
@ -168,8 +168,8 @@ class PrimalSolutionComponent(Component):
f: List[float] = [] f: List[float] = []
assert var_features.user_features is not None assert var_features.user_features is not None
f += var_features.user_features f += var_features.user_features
if "LP solution" in sample and sample["LP solution"] is not None: if sample.lp_solution is not None:
lp_value = sample["LP solution"][var_name][idx] lp_value = sample.lp_solution[var_name][idx]
if lp_value is not None: if lp_value is not None:
f += [lp_value] f += [lp_value]
x[category] += [f] x[category] += [f]
@ -190,7 +190,7 @@ class PrimalSolutionComponent(Component):
features: Features, features: Features,
sample: TrainingSample, sample: TrainingSample,
) -> Dict[Hashable, Dict[str, float]]: ) -> Dict[Hashable, Dict[str, float]]:
solution_actual = sample["Solution"] solution_actual = sample.solution
assert solution_actual is not None assert solution_actual is not None
solution_pred = self.sample_predict(features, sample) solution_pred = self.sample_predict(features, sample)
vars_all, vars_one, vars_zero = set(), set(), set() vars_all, vars_one, vars_zero = set(), set(), set()

@ -95,8 +95,8 @@ class ConvertTightIneqsIntoEqsStep(Component):
features, features,
training_data, training_data,
): ):
if "slacks" not in training_data.keys(): if training_data.slacks is None:
training_data["slacks"] = solver.internal_solver.get_inequality_slacks() training_data.slacks = solver.internal_solver.get_inequality_slacks()
stats["ConvertTight: Restored"] = self.n_restored stats["ConvertTight: Restored"] = self.n_restored
stats["ConvertTight: Inf iterations"] = self.n_infeasible_iterations stats["ConvertTight: Inf iterations"] = self.n_infeasible_iterations
stats["ConvertTight: Subopt iterations"] = self.n_suboptimal_iterations stats["ConvertTight: Subopt iterations"] = self.n_suboptimal_iterations
@ -120,7 +120,7 @@ class ConvertTightIneqsIntoEqsStep(Component):
disable=len(instances) < 5, disable=len(instances) < 5,
): ):
for training_data in instance.training_data: for training_data in instance.training_data:
cids = training_data["slacks"].keys() cids = training_data.slacks.keys()
for cid in cids: for cid in cids:
category = instance.get_constraint_category(cid) category = instance.get_constraint_category(cid)
if category is None: if category is None:
@ -142,7 +142,7 @@ class ConvertTightIneqsIntoEqsStep(Component):
desc="Extract (rlx:conv_ineqs:y)", desc="Extract (rlx:conv_ineqs:y)",
disable=len(instances) < 5, disable=len(instances) < 5,
): ):
for (cid, slack) in instance.training_data[0]["slacks"].items(): for (cid, slack) in instance.training_data[0].slacks.items():
category = instance.get_constraint_category(cid) category = instance.get_constraint_category(cid)
if category is None: if category is None:
continue continue

@ -96,8 +96,8 @@ class DropRedundantInequalitiesStep(Component):
features, features,
training_data, training_data,
): ):
if "slacks" not in training_data.keys(): if training_data.slacks is None:
training_data["slacks"] = solver.internal_solver.get_inequality_slacks() training_data.slacks = solver.internal_solver.get_inequality_slacks()
stats["DropRedundant: Iterations"] = self.n_iterations stats["DropRedundant: Iterations"] = self.n_iterations
stats["DropRedundant: Restored"] = self.n_restored stats["DropRedundant: Restored"] = self.n_restored
@ -131,7 +131,7 @@ class DropRedundantInequalitiesStep(Component):
x = {} x = {}
y = {} y = {}
for training_data in instance.training_data: for training_data in instance.training_data:
for (cid, slack) in training_data["slacks"].items(): for (cid, slack) in training_data.slacks.items():
category = instance.get_constraint_category(cid) category = instance.get_constraint_category(cid)
if category is None: if category is None:
continue continue

@ -18,7 +18,7 @@ class Extractor(ABC):
@staticmethod @staticmethod
def split_variables(instance): def split_variables(instance):
result = {} result = {}
lp_solution = instance.training_data[0]["LP solution"] lp_solution = instance.training_data[0].lp_solution
for var_name in lp_solution: for var_name in lp_solution:
for index in lp_solution[var_name]: for index in lp_solution[var_name]:
category = instance.get_variable_category(var_name, index) category = instance.get_variable_category(var_name, index)
@ -37,7 +37,7 @@ class InstanceFeaturesExtractor(Extractor):
np.hstack( np.hstack(
[ [
instance.get_instance_features(), instance.get_instance_features(),
instance.training_data[0]["LP value"], instance.training_data[0].lp_value,
] ]
) )
for instance in instances for instance in instances

@ -2,13 +2,9 @@
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved. # Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details. # Released under the modified BSD license. See COPYING.md for more details.
import gzip
import logging import logging
import os
import pickle
import traceback import traceback
import tempfile from typing import Optional, List, Any, cast, Callable, Dict
from typing import Optional, List, Any, IO, cast, BinaryIO, Union, Callable, Dict
from p_tqdm import p_map from p_tqdm import p_map
@ -22,7 +18,7 @@ from miplearn.instance import Instance, PickleGzInstance
from miplearn.solvers import _RedirectOutput from miplearn.solvers import _RedirectOutput
from miplearn.solvers.internal import InternalSolver from miplearn.solvers.internal import InternalSolver
from miplearn.solvers.pyomo.gurobi import GurobiPyomoSolver from miplearn.solvers.pyomo.gurobi import GurobiPyomoSolver
from miplearn.types import TrainingSample, LearningSolveStats, MIPSolveStats from miplearn.types import TrainingSample, LearningSolveStats
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -134,7 +130,7 @@ class LearningSolver:
model = instance.to_model() model = instance.to_model()
# Initialize training sample # Initialize training sample
training_sample: TrainingSample = {} training_sample = TrainingSample()
instance.training_data += [training_sample] instance.training_data += [training_sample]
# Initialize stats # Initialize stats
@ -168,16 +164,13 @@ class LearningSolver:
logger.info("Solving root LP relaxation...") logger.info("Solving root LP relaxation...")
lp_stats = self.internal_solver.solve_lp(tee=tee) lp_stats = self.internal_solver.solve_lp(tee=tee)
stats.update(cast(LearningSolveStats, lp_stats)) stats.update(cast(LearningSolveStats, lp_stats))
training_sample["LP solution"] = self.internal_solver.get_solution() training_sample.lp_solution = self.internal_solver.get_solution()
training_sample["LP value"] = lp_stats["LP value"] training_sample.lp_value = lp_stats["LP value"]
training_sample["LP log"] = lp_stats["LP log"] training_sample.lp_log = lp_stats["LP log"]
logger.debug("Running after_solve_lp callbacks...") logger.debug("Running after_solve_lp callbacks...")
for component in self.components.values(): for component in self.components.values():
component.after_solve_lp(*callback_args) component.after_solve_lp(*callback_args)
else:
training_sample["LP solution"] = self.internal_solver.get_empty_solution()
training_sample["LP value"] = 0.0
# Define wrappers # Define wrappers
def iteration_cb_wrapper() -> bool: def iteration_cb_wrapper() -> bool:
@ -213,8 +206,8 @@ class LearningSolver:
lazy_cb=lazy_cb, lazy_cb=lazy_cb,
) )
stats.update(cast(LearningSolveStats, mip_stats)) stats.update(cast(LearningSolveStats, mip_stats))
if "LP value" in training_sample.keys(): if training_sample.lp_value is not None:
stats["LP value"] = training_sample["LP value"] stats["LP value"] = training_sample.lp_value
stats["Solver"] = "default" stats["Solver"] = "default"
stats["Gap"] = self._compute_gap( stats["Gap"] = self._compute_gap(
ub=stats["Upper bound"], ub=stats["Upper bound"],
@ -223,10 +216,10 @@ class LearningSolver:
stats["Mode"] = self.mode stats["Mode"] = self.mode
# Add some information to training_sample # Add some information to training_sample
training_sample["Lower bound"] = stats["Lower bound"] training_sample.lower_bound = stats["Lower bound"]
training_sample["Upper bound"] = stats["Upper bound"] training_sample.upper_bound = stats["Upper bound"]
training_sample["MIP log"] = stats["MIP log"] training_sample.mip_log = stats["MIP log"]
training_sample["Solution"] = self.internal_solver.get_solution() training_sample.solution = self.internal_solver.get_solution()
# After-solve callbacks # After-solve callbacks
logger.debug("Calling after_solve_mip callbacks...") logger.debug("Calling after_solve_mip callbacks...")

@ -11,22 +11,19 @@ VarIndex = Union[str, int, Tuple[Union[str, int]]]
Solution = Dict[str, Dict[VarIndex, Optional[float]]] Solution = Dict[str, Dict[VarIndex, Optional[float]]]
TrainingSample = TypedDict(
"TrainingSample", @dataclass
{ class TrainingSample:
"LP log": str, lp_log: Optional[str] = None
"LP solution": Optional[Solution], lp_solution: Optional[Solution] = None
"LP value": Optional[float], lp_value: Optional[float] = None
"LazyStatic: All": Set[str], lazy_enforced: Optional[Set[str]] = None
"LazyStatic: Enforced": Set[str], lower_bound: Optional[float] = None
"Lower bound": Optional[float], mip_log: Optional[str] = None
"MIP log": str, solution: Optional[Solution] = None
"Solution": Optional[Solution], upper_bound: Optional[float] = None
"Upper bound": Optional[float], slacks: Optional[Dict[str, float]] = None
"slacks": Dict,
},
total=False,
)
LPSolveStats = TypedDict( LPSolveStats = TypedDict(
"LPSolveStats", "LPSolveStats",

@ -28,7 +28,7 @@ def test_convert_tight_usage():
original_upper_bound = stats["Upper bound"] original_upper_bound = stats["Upper bound"]
# Should collect training data # Should collect training data
assert instance.training_data[0]["slacks"]["eq_capacity"] == 0.0 assert instance.training_data[0].slacks["eq_capacity"] == 0.0
# Fit and resolve # Fit and resolve
solver.fit([instance]) solver.fit([instance])

@ -12,6 +12,7 @@ from miplearn.components.steps.drop_redundant import DropRedundantInequalitiesSt
from miplearn.instance import Instance from miplearn.instance import Instance
from miplearn.solvers.internal import InternalSolver from miplearn.solvers.internal import InternalSolver
from miplearn.solvers.learning import LearningSolver from miplearn.solvers.learning import LearningSolver
from miplearn.types import TrainingSample, Features
from tests.fixtures.infeasible import get_infeasible_instance from tests.fixtures.infeasible import get_infeasible_instance
from tests.fixtures.redundant import get_instance_with_redundancy from tests.fixtures.redundant import get_instance_with_redundancy
@ -85,8 +86,8 @@ def test_drop_redundant():
instance=instance, instance=instance,
model=None, model=None,
stats={}, stats={},
features=None, features=Features(),
training_data=None, training_data=TrainingSample(),
) )
# Should query list of constraints # Should query list of constraints
@ -129,13 +130,13 @@ def test_drop_redundant():
) )
# LearningSolver calls after_solve # LearningSolver calls after_solve
training_data = {} training_data = TrainingSample()
component.after_solve_mip( component.after_solve_mip(
solver=solver, solver=solver,
instance=instance, instance=instance,
model=None, model=None,
stats={}, stats={},
features=None, features=Features(),
training_data=training_data, training_data=training_data,
) )
@ -143,7 +144,7 @@ def test_drop_redundant():
internal.get_inequality_slacks.assert_called_once() internal.get_inequality_slacks.assert_called_once()
# Should store constraint slacks in instance object # Should store constraint slacks in instance object
assert training_data["slacks"] == { assert training_data.slacks == {
"c1": 0.5, "c1": 0.5,
"c2": 0.0, "c2": 0.0,
"c3": 0.0, "c3": 0.0,
@ -166,8 +167,8 @@ def test_drop_redundant_with_check_feasibility():
instance=instance, instance=instance,
model=None, model=None,
stats={}, stats={},
features=None, features=Features(),
training_data=None, training_data=TrainingSample(),
) )
# Assert constraints are extracted # Assert constraints are extracted
@ -224,14 +225,14 @@ def test_x_y_fit_predict_evaluate():
# First mock instance # First mock instance
instances[0].training_data = [ instances[0].training_data = [
{ TrainingSample(
"slacks": { slacks={
"c1": 0.00, "c1": 0.00,
"c2": 0.05, "c2": 0.05,
"c3": 0.00, "c3": 0.00,
"c4": 30.0, "c4": 30.0,
} }
} )
] ]
instances[0].get_constraint_category = Mock( instances[0].get_constraint_category = Mock(
side_effect=lambda cid: { side_effect=lambda cid: {
@ -251,14 +252,14 @@ def test_x_y_fit_predict_evaluate():
# Second mock instance # Second mock instance
instances[1].training_data = [ instances[1].training_data = [
{ TrainingSample(
"slacks": { slacks={
"c1": 0.00, "c1": 0.00,
"c3": 0.30, "c3": 0.30,
"c4": 0.00, "c4": 0.00,
"c5": 0.00, "c5": 0.00,
} }
} )
] ]
instances[1].get_constraint_category = Mock( instances[1].get_constraint_category = Mock(
side_effect=lambda cid: { side_effect=lambda cid: {
@ -343,22 +344,22 @@ def test_x_y_fit_predict_evaluate():
def test_x_multiple_solves(): def test_x_multiple_solves():
instance = Mock(spec=Instance) instance = Mock(spec=Instance)
instance.training_data = [ instance.training_data = [
{ TrainingSample(
"slacks": { slacks={
"c1": 0.00, "c1": 0.00,
"c2": 0.05, "c2": 0.05,
"c3": 0.00, "c3": 0.00,
"c4": 30.0, "c4": 30.0,
} }
}, ),
{ TrainingSample(
"slacks": { slacks={
"c1": 0.00, "c1": 0.00,
"c2": 0.00, "c2": 0.00,
"c3": 1.00, "c3": 1.00,
"c4": 0.0, "c4": 0.0,
} }
}, ),
] ]
instance.get_constraint_category = Mock( instance.get_constraint_category = Mock(
side_effect=lambda cid: { side_effect=lambda cid: {

@ -23,9 +23,9 @@ from miplearn.types import (
@pytest.fixture @pytest.fixture
def sample() -> TrainingSample: def sample() -> TrainingSample:
return { return TrainingSample(
"LazyStatic: Enforced": {"c1", "c2", "c4"}, lazy_enforced={"c1", "c2", "c4"},
} )
@pytest.fixture @pytest.fixture
@ -101,7 +101,7 @@ def test_usage_with_solver(features: Features) -> None:
) )
) )
sample: TrainingSample = {} sample: TrainingSample = TrainingSample()
stats: LearningSolveStats = {} stats: LearningSolveStats = {}
# LearningSolver calls before_solve_mip # LearningSolver calls before_solve_mip
@ -152,7 +152,7 @@ def test_usage_with_solver(features: Features) -> None:
) )
# Should update training sample # Should update training sample
assert sample["LazyStatic: Enforced"] == {"c1", "c2", "c3", "c4"} assert sample.lazy_enforced == {"c1", "c2", "c3", "c4"}
# Should update stats # Should update stats
assert stats["LazyStatic: Removed"] == 1 assert stats["LazyStatic: Removed"] == 1

@ -26,27 +26,27 @@ def features() -> Features:
@pytest.fixture @pytest.fixture
def sample() -> TrainingSample: def sample() -> TrainingSample:
return { return TrainingSample(
"Lower bound": 1.0, lower_bound=1.0,
"Upper bound": 2.0, upper_bound=2.0,
"LP value": 3.0, lp_value=3.0,
} )
@pytest.fixture @pytest.fixture
def sample_without_lp() -> TrainingSample: def sample_without_lp() -> TrainingSample:
return { return TrainingSample(
"Lower bound": 1.0, lower_bound=1.0,
"Upper bound": 2.0, upper_bound=2.0,
} )
@pytest.fixture @pytest.fixture
def sample_without_ub() -> TrainingSample: def sample_without_ub() -> TrainingSample:
return { return TrainingSample(
"Lower bound": 1.0, lower_bound=1.0,
"LP value": 3.0, lp_value=3.0,
} )
def test_sample_xy( def test_sample_xy(

@ -38,8 +38,8 @@ def test_xy() -> None:
} }
} }
) )
sample: TrainingSample = { sample = TrainingSample(
"Solution": { solution={
"x": { "x": {
0: 0.0, 0: 0.0,
1: 1.0, 1: 1.0,
@ -47,7 +47,7 @@ def test_xy() -> None:
3: 0.0, 3: 0.0,
} }
}, },
"LP solution": { lp_solution={
"x": { "x": {
0: 0.1, 0: 0.1,
1: 0.1, 1: 0.1,
@ -55,7 +55,7 @@ def test_xy() -> None:
3: 0.1, 3: 0.1,
} }
}, },
} )
x_expected = { x_expected = {
"default": [ "default": [
[0.0, 0.0, 0.1], [0.0, 0.0, 0.1],
@ -99,8 +99,8 @@ def test_xy_without_lp_solution() -> None:
} }
} }
) )
sample: TrainingSample = { sample = TrainingSample(
"Solution": { solution={
"x": { "x": {
0: 0.0, 0: 0.0,
1: 1.0, 1: 1.0,
@ -108,7 +108,7 @@ def test_xy_without_lp_solution() -> None:
3: 0.0, 3: 0.0,
} }
}, },
} )
x_expected = { x_expected = {
"default": [ "default": [
[0.0, 0.0], [0.0, 0.0],
@ -161,15 +161,15 @@ def test_predict() -> None:
} }
} }
) )
sample: TrainingSample = { sample = TrainingSample(
"LP solution": { lp_solution={
"x": { "x": {
0: 0.1, 0: 0.1,
1: 0.5, 1: 0.5,
2: 0.9, 2: 0.9,
} }
} }
} )
x, _ = PrimalSolutionComponent.sample_xy(features, sample) x, _ = PrimalSolutionComponent.sample_xy(features, sample)
comp = PrimalSolutionComponent() comp = PrimalSolutionComponent()
comp.classifiers = {"default": clf} comp.classifiers = {"default": clf}
@ -254,8 +254,8 @@ def test_evaluate() -> None:
} }
} }
) )
sample: TrainingSample = { sample = TrainingSample(
"Solution": { solution={
"x": { "x": {
0: 1.0, 0: 1.0,
1: 1.0, 1: 1.0,
@ -264,7 +264,7 @@ def test_evaluate() -> None:
4: 1.0, 4: 1.0,
} }
} }
} )
ev = comp.sample_evaluate(features, sample) ev = comp.sample_evaluate(features, sample)
assert ev == { assert ev == {
0: classifier_evaluation_dict(tp=1, fp=1, tn=3, fn=0), 0: classifier_evaluation_dict(tp=1, fp=1, tn=3, fn=0),

@ -39,7 +39,7 @@ def test_instance():
instance = TravelingSalesmanInstance(n_cities, distances) instance = TravelingSalesmanInstance(n_cities, distances)
solver = LearningSolver() solver = LearningSolver()
stats = solver.solve(instance) stats = solver.solve(instance)
x = instance.training_data[0]["Solution"]["x"] x = instance.training_data[0].solution["x"]
assert x[0, 1] == 1.0 assert x[0, 1] == 1.0
assert x[0, 2] == 0.0 assert x[0, 2] == 0.0
assert x[0, 3] == 1.0 assert x[0, 3] == 1.0
@ -68,7 +68,7 @@ def test_subtour():
solver.solve(instance) solver.solve(instance)
assert hasattr(instance, "found_violated_lazy_constraints") assert hasattr(instance, "found_violated_lazy_constraints")
assert hasattr(instance, "found_violated_user_cuts") assert hasattr(instance, "found_violated_user_cuts")
x = instance.training_data[0]["Solution"]["x"] x = instance.training_data[0].solution["x"]
assert x[0, 1] == 1.0 assert x[0, 1] == 1.0
assert x[0, 4] == 1.0 assert x[0, 4] == 1.0
assert x[1, 2] == 1.0 assert x[1, 2] == 1.0

@ -27,22 +27,21 @@ def test_learning_solver():
) )
solver.solve(instance) solver.solve(instance)
assert hasattr(instance, "features") assert hasattr(instance, "features")
data = instance.training_data[0] sample = instance.training_data[0]
assert data["Solution"]["x"][0] == 1.0 assert sample.solution["x"][0] == 1.0
assert data["Solution"]["x"][1] == 0.0 assert sample.solution["x"][1] == 0.0
assert data["Solution"]["x"][2] == 1.0 assert sample.solution["x"][2] == 1.0
assert data["Solution"]["x"][3] == 1.0 assert sample.solution["x"][3] == 1.0
assert data["Lower bound"] == 1183.0 assert sample.lower_bound == 1183.0
assert data["Upper bound"] == 1183.0 assert sample.upper_bound == 1183.0
assert round(data["LP solution"]["x"][0], 3) == 1.000 assert round(sample.lp_solution["x"][0], 3) == 1.000
assert round(data["LP solution"]["x"][1], 3) == 0.923 assert round(sample.lp_solution["x"][1], 3) == 0.923
assert round(data["LP solution"]["x"][2], 3) == 1.000 assert round(sample.lp_solution["x"][2], 3) == 1.000
assert round(data["LP solution"]["x"][3], 3) == 0.000 assert round(sample.lp_solution["x"][3], 3) == 0.000
assert round(data["LP value"], 3) == 1287.923 assert round(sample.lp_value, 3) == 1287.923
assert len(data["MIP log"]) > 100 assert len(sample.mip_log) > 100
solver.fit([instance]) solver.fit([instance])
solver.solve(instance) solver.solve(instance)
@ -73,7 +72,7 @@ def test_parallel_solve():
assert len(results) == 10 assert len(results) == 10
for instance in instances: for instance in instances:
data = instance.training_data[0] data = instance.training_data[0]
assert len(data["Solution"]["x"].keys()) == 4 assert len(data.solution["x"].keys()) == 4
def test_solve_fit_from_disk(): def test_solve_fit_from_disk():

Loading…
Cancel
Save