mirror of
https://github.com/ANL-CEEESA/MIPLearn.git
synced 2025-12-06 01:18:52 -06:00
Convert TrainingSample to dataclass
This commit is contained in:
@@ -93,7 +93,7 @@ class StaticLazyConstraintsComponent(Component):
|
||||
features: Features,
|
||||
training_data: TrainingSample,
|
||||
) -> None:
|
||||
training_data["LazyStatic: Enforced"] = self.enforced_cids
|
||||
training_data.lazy_enforced = self.enforced_cids
|
||||
stats["LazyStatic: Restored"] = self.n_restored
|
||||
stats["LazyStatic: Iterations"] = self.n_iterations
|
||||
|
||||
@@ -188,8 +188,8 @@ class StaticLazyConstraintsComponent(Component):
|
||||
x[category] = []
|
||||
y[category] = []
|
||||
x[category] += [cfeatures.user_features]
|
||||
if "LazyStatic: Enforced" in sample:
|
||||
if cid in sample["LazyStatic: Enforced"]:
|
||||
if sample.lazy_enforced is not None:
|
||||
if cid in sample.lazy_enforced:
|
||||
y[category] += [[False, True]]
|
||||
else:
|
||||
y[category] += [[True, False]]
|
||||
|
||||
@@ -82,12 +82,14 @@ class ObjectiveValueComponent(Component):
|
||||
x: Dict[Hashable, List[List[float]]] = {}
|
||||
y: Dict[Hashable, List[List[float]]] = {}
|
||||
f = list(features.instance.user_features)
|
||||
if "LP value" in sample and sample["LP value"] is not None:
|
||||
f += [sample["LP value"]]
|
||||
for c in ["Upper bound", "Lower bound"]:
|
||||
x[c] = [f]
|
||||
if c in sample and sample[c] is not None: # type: ignore
|
||||
y[c] = [[sample[c]]] # type: ignore
|
||||
if sample.lp_value is not None:
|
||||
f += [sample.lp_value]
|
||||
x["Upper bound"] = [f]
|
||||
x["Lower bound"] = [f]
|
||||
if sample.lower_bound is not None:
|
||||
y["Lower bound"] = [[sample.lower_bound]]
|
||||
if sample.upper_bound is not None:
|
||||
y["Upper bound"] = [[sample.upper_bound]]
|
||||
return x, y
|
||||
|
||||
def sample_evaluate(
|
||||
@@ -106,7 +108,8 @@ class ObjectiveValueComponent(Component):
|
||||
|
||||
result: Dict[Hashable, Dict[str, float]] = {}
|
||||
pred = self.sample_predict(features, sample)
|
||||
for c in ["Upper bound", "Lower bound"]:
|
||||
if c in sample and sample[c] is not None: # type: ignore
|
||||
result[c] = compare(pred[c], sample[c]) # type: ignore
|
||||
if sample.upper_bound is not None:
|
||||
result["Upper bound"] = compare(pred["Upper bound"], sample.upper_bound)
|
||||
if sample.lower_bound is not None:
|
||||
result["Lower bound"] = compare(pred["Lower bound"], sample.lower_bound)
|
||||
return result
|
||||
|
||||
@@ -155,8 +155,8 @@ class PrimalSolutionComponent(Component):
|
||||
x: Dict = {}
|
||||
y: Dict = {}
|
||||
solution: Optional[Solution] = None
|
||||
if "Solution" in sample and sample["Solution"] is not None:
|
||||
solution = sample["Solution"]
|
||||
if sample.solution is not None:
|
||||
solution = sample.solution
|
||||
for (var_name, var_dict) in features.variables.items():
|
||||
for (idx, var_features) in var_dict.items():
|
||||
category = var_features.category
|
||||
@@ -168,8 +168,8 @@ class PrimalSolutionComponent(Component):
|
||||
f: List[float] = []
|
||||
assert var_features.user_features is not None
|
||||
f += var_features.user_features
|
||||
if "LP solution" in sample and sample["LP solution"] is not None:
|
||||
lp_value = sample["LP solution"][var_name][idx]
|
||||
if sample.lp_solution is not None:
|
||||
lp_value = sample.lp_solution[var_name][idx]
|
||||
if lp_value is not None:
|
||||
f += [lp_value]
|
||||
x[category] += [f]
|
||||
@@ -190,7 +190,7 @@ class PrimalSolutionComponent(Component):
|
||||
features: Features,
|
||||
sample: TrainingSample,
|
||||
) -> Dict[Hashable, Dict[str, float]]:
|
||||
solution_actual = sample["Solution"]
|
||||
solution_actual = sample.solution
|
||||
assert solution_actual is not None
|
||||
solution_pred = self.sample_predict(features, sample)
|
||||
vars_all, vars_one, vars_zero = set(), set(), set()
|
||||
|
||||
@@ -95,8 +95,8 @@ class ConvertTightIneqsIntoEqsStep(Component):
|
||||
features,
|
||||
training_data,
|
||||
):
|
||||
if "slacks" not in training_data.keys():
|
||||
training_data["slacks"] = solver.internal_solver.get_inequality_slacks()
|
||||
if training_data.slacks is None:
|
||||
training_data.slacks = solver.internal_solver.get_inequality_slacks()
|
||||
stats["ConvertTight: Restored"] = self.n_restored
|
||||
stats["ConvertTight: Inf iterations"] = self.n_infeasible_iterations
|
||||
stats["ConvertTight: Subopt iterations"] = self.n_suboptimal_iterations
|
||||
@@ -120,7 +120,7 @@ class ConvertTightIneqsIntoEqsStep(Component):
|
||||
disable=len(instances) < 5,
|
||||
):
|
||||
for training_data in instance.training_data:
|
||||
cids = training_data["slacks"].keys()
|
||||
cids = training_data.slacks.keys()
|
||||
for cid in cids:
|
||||
category = instance.get_constraint_category(cid)
|
||||
if category is None:
|
||||
@@ -142,7 +142,7 @@ class ConvertTightIneqsIntoEqsStep(Component):
|
||||
desc="Extract (rlx:conv_ineqs:y)",
|
||||
disable=len(instances) < 5,
|
||||
):
|
||||
for (cid, slack) in instance.training_data[0]["slacks"].items():
|
||||
for (cid, slack) in instance.training_data[0].slacks.items():
|
||||
category = instance.get_constraint_category(cid)
|
||||
if category is None:
|
||||
continue
|
||||
|
||||
@@ -96,8 +96,8 @@ class DropRedundantInequalitiesStep(Component):
|
||||
features,
|
||||
training_data,
|
||||
):
|
||||
if "slacks" not in training_data.keys():
|
||||
training_data["slacks"] = solver.internal_solver.get_inequality_slacks()
|
||||
if training_data.slacks is None:
|
||||
training_data.slacks = solver.internal_solver.get_inequality_slacks()
|
||||
stats["DropRedundant: Iterations"] = self.n_iterations
|
||||
stats["DropRedundant: Restored"] = self.n_restored
|
||||
|
||||
@@ -131,7 +131,7 @@ class DropRedundantInequalitiesStep(Component):
|
||||
x = {}
|
||||
y = {}
|
||||
for training_data in instance.training_data:
|
||||
for (cid, slack) in training_data["slacks"].items():
|
||||
for (cid, slack) in training_data.slacks.items():
|
||||
category = instance.get_constraint_category(cid)
|
||||
if category is None:
|
||||
continue
|
||||
|
||||
@@ -18,7 +18,7 @@ class Extractor(ABC):
|
||||
@staticmethod
|
||||
def split_variables(instance):
|
||||
result = {}
|
||||
lp_solution = instance.training_data[0]["LP solution"]
|
||||
lp_solution = instance.training_data[0].lp_solution
|
||||
for var_name in lp_solution:
|
||||
for index in lp_solution[var_name]:
|
||||
category = instance.get_variable_category(var_name, index)
|
||||
@@ -37,7 +37,7 @@ class InstanceFeaturesExtractor(Extractor):
|
||||
np.hstack(
|
||||
[
|
||||
instance.get_instance_features(),
|
||||
instance.training_data[0]["LP value"],
|
||||
instance.training_data[0].lp_value,
|
||||
]
|
||||
)
|
||||
for instance in instances
|
||||
|
||||
@@ -2,13 +2,9 @@
|
||||
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
|
||||
# Released under the modified BSD license. See COPYING.md for more details.
|
||||
|
||||
import gzip
|
||||
import logging
|
||||
import os
|
||||
import pickle
|
||||
import traceback
|
||||
import tempfile
|
||||
from typing import Optional, List, Any, IO, cast, BinaryIO, Union, Callable, Dict
|
||||
from typing import Optional, List, Any, cast, Callable, Dict
|
||||
|
||||
from p_tqdm import p_map
|
||||
|
||||
@@ -22,7 +18,7 @@ from miplearn.instance import Instance, PickleGzInstance
|
||||
from miplearn.solvers import _RedirectOutput
|
||||
from miplearn.solvers.internal import InternalSolver
|
||||
from miplearn.solvers.pyomo.gurobi import GurobiPyomoSolver
|
||||
from miplearn.types import TrainingSample, LearningSolveStats, MIPSolveStats
|
||||
from miplearn.types import TrainingSample, LearningSolveStats
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -134,7 +130,7 @@ class LearningSolver:
|
||||
model = instance.to_model()
|
||||
|
||||
# Initialize training sample
|
||||
training_sample: TrainingSample = {}
|
||||
training_sample = TrainingSample()
|
||||
instance.training_data += [training_sample]
|
||||
|
||||
# Initialize stats
|
||||
@@ -168,16 +164,13 @@ class LearningSolver:
|
||||
logger.info("Solving root LP relaxation...")
|
||||
lp_stats = self.internal_solver.solve_lp(tee=tee)
|
||||
stats.update(cast(LearningSolveStats, lp_stats))
|
||||
training_sample["LP solution"] = self.internal_solver.get_solution()
|
||||
training_sample["LP value"] = lp_stats["LP value"]
|
||||
training_sample["LP log"] = lp_stats["LP log"]
|
||||
training_sample.lp_solution = self.internal_solver.get_solution()
|
||||
training_sample.lp_value = lp_stats["LP value"]
|
||||
training_sample.lp_log = lp_stats["LP log"]
|
||||
|
||||
logger.debug("Running after_solve_lp callbacks...")
|
||||
for component in self.components.values():
|
||||
component.after_solve_lp(*callback_args)
|
||||
else:
|
||||
training_sample["LP solution"] = self.internal_solver.get_empty_solution()
|
||||
training_sample["LP value"] = 0.0
|
||||
|
||||
# Define wrappers
|
||||
def iteration_cb_wrapper() -> bool:
|
||||
@@ -213,8 +206,8 @@ class LearningSolver:
|
||||
lazy_cb=lazy_cb,
|
||||
)
|
||||
stats.update(cast(LearningSolveStats, mip_stats))
|
||||
if "LP value" in training_sample.keys():
|
||||
stats["LP value"] = training_sample["LP value"]
|
||||
if training_sample.lp_value is not None:
|
||||
stats["LP value"] = training_sample.lp_value
|
||||
stats["Solver"] = "default"
|
||||
stats["Gap"] = self._compute_gap(
|
||||
ub=stats["Upper bound"],
|
||||
@@ -223,10 +216,10 @@ class LearningSolver:
|
||||
stats["Mode"] = self.mode
|
||||
|
||||
# Add some information to training_sample
|
||||
training_sample["Lower bound"] = stats["Lower bound"]
|
||||
training_sample["Upper bound"] = stats["Upper bound"]
|
||||
training_sample["MIP log"] = stats["MIP log"]
|
||||
training_sample["Solution"] = self.internal_solver.get_solution()
|
||||
training_sample.lower_bound = stats["Lower bound"]
|
||||
training_sample.upper_bound = stats["Upper bound"]
|
||||
training_sample.mip_log = stats["MIP log"]
|
||||
training_sample.solution = self.internal_solver.get_solution()
|
||||
|
||||
# After-solve callbacks
|
||||
logger.debug("Calling after_solve_mip callbacks...")
|
||||
|
||||
@@ -11,22 +11,19 @@ VarIndex = Union[str, int, Tuple[Union[str, int]]]
|
||||
|
||||
Solution = Dict[str, Dict[VarIndex, Optional[float]]]
|
||||
|
||||
TrainingSample = TypedDict(
|
||||
"TrainingSample",
|
||||
{
|
||||
"LP log": str,
|
||||
"LP solution": Optional[Solution],
|
||||
"LP value": Optional[float],
|
||||
"LazyStatic: All": Set[str],
|
||||
"LazyStatic: Enforced": Set[str],
|
||||
"Lower bound": Optional[float],
|
||||
"MIP log": str,
|
||||
"Solution": Optional[Solution],
|
||||
"Upper bound": Optional[float],
|
||||
"slacks": Dict,
|
||||
},
|
||||
total=False,
|
||||
)
|
||||
|
||||
@dataclass
|
||||
class TrainingSample:
|
||||
lp_log: Optional[str] = None
|
||||
lp_solution: Optional[Solution] = None
|
||||
lp_value: Optional[float] = None
|
||||
lazy_enforced: Optional[Set[str]] = None
|
||||
lower_bound: Optional[float] = None
|
||||
mip_log: Optional[str] = None
|
||||
solution: Optional[Solution] = None
|
||||
upper_bound: Optional[float] = None
|
||||
slacks: Optional[Dict[str, float]] = None
|
||||
|
||||
|
||||
LPSolveStats = TypedDict(
|
||||
"LPSolveStats",
|
||||
|
||||
@@ -28,7 +28,7 @@ def test_convert_tight_usage():
|
||||
original_upper_bound = stats["Upper bound"]
|
||||
|
||||
# Should collect training data
|
||||
assert instance.training_data[0]["slacks"]["eq_capacity"] == 0.0
|
||||
assert instance.training_data[0].slacks["eq_capacity"] == 0.0
|
||||
|
||||
# Fit and resolve
|
||||
solver.fit([instance])
|
||||
|
||||
@@ -12,6 +12,7 @@ from miplearn.components.steps.drop_redundant import DropRedundantInequalitiesSt
|
||||
from miplearn.instance import Instance
|
||||
from miplearn.solvers.internal import InternalSolver
|
||||
from miplearn.solvers.learning import LearningSolver
|
||||
from miplearn.types import TrainingSample, Features
|
||||
from tests.fixtures.infeasible import get_infeasible_instance
|
||||
from tests.fixtures.redundant import get_instance_with_redundancy
|
||||
|
||||
@@ -85,8 +86,8 @@ def test_drop_redundant():
|
||||
instance=instance,
|
||||
model=None,
|
||||
stats={},
|
||||
features=None,
|
||||
training_data=None,
|
||||
features=Features(),
|
||||
training_data=TrainingSample(),
|
||||
)
|
||||
|
||||
# Should query list of constraints
|
||||
@@ -129,13 +130,13 @@ def test_drop_redundant():
|
||||
)
|
||||
|
||||
# LearningSolver calls after_solve
|
||||
training_data = {}
|
||||
training_data = TrainingSample()
|
||||
component.after_solve_mip(
|
||||
solver=solver,
|
||||
instance=instance,
|
||||
model=None,
|
||||
stats={},
|
||||
features=None,
|
||||
features=Features(),
|
||||
training_data=training_data,
|
||||
)
|
||||
|
||||
@@ -143,7 +144,7 @@ def test_drop_redundant():
|
||||
internal.get_inequality_slacks.assert_called_once()
|
||||
|
||||
# Should store constraint slacks in instance object
|
||||
assert training_data["slacks"] == {
|
||||
assert training_data.slacks == {
|
||||
"c1": 0.5,
|
||||
"c2": 0.0,
|
||||
"c3": 0.0,
|
||||
@@ -166,8 +167,8 @@ def test_drop_redundant_with_check_feasibility():
|
||||
instance=instance,
|
||||
model=None,
|
||||
stats={},
|
||||
features=None,
|
||||
training_data=None,
|
||||
features=Features(),
|
||||
training_data=TrainingSample(),
|
||||
)
|
||||
|
||||
# Assert constraints are extracted
|
||||
@@ -224,14 +225,14 @@ def test_x_y_fit_predict_evaluate():
|
||||
|
||||
# First mock instance
|
||||
instances[0].training_data = [
|
||||
{
|
||||
"slacks": {
|
||||
TrainingSample(
|
||||
slacks={
|
||||
"c1": 0.00,
|
||||
"c2": 0.05,
|
||||
"c3": 0.00,
|
||||
"c4": 30.0,
|
||||
}
|
||||
}
|
||||
)
|
||||
]
|
||||
instances[0].get_constraint_category = Mock(
|
||||
side_effect=lambda cid: {
|
||||
@@ -251,14 +252,14 @@ def test_x_y_fit_predict_evaluate():
|
||||
|
||||
# Second mock instance
|
||||
instances[1].training_data = [
|
||||
{
|
||||
"slacks": {
|
||||
TrainingSample(
|
||||
slacks={
|
||||
"c1": 0.00,
|
||||
"c3": 0.30,
|
||||
"c4": 0.00,
|
||||
"c5": 0.00,
|
||||
}
|
||||
}
|
||||
)
|
||||
]
|
||||
instances[1].get_constraint_category = Mock(
|
||||
side_effect=lambda cid: {
|
||||
@@ -343,22 +344,22 @@ def test_x_y_fit_predict_evaluate():
|
||||
def test_x_multiple_solves():
|
||||
instance = Mock(spec=Instance)
|
||||
instance.training_data = [
|
||||
{
|
||||
"slacks": {
|
||||
TrainingSample(
|
||||
slacks={
|
||||
"c1": 0.00,
|
||||
"c2": 0.05,
|
||||
"c3": 0.00,
|
||||
"c4": 30.0,
|
||||
}
|
||||
},
|
||||
{
|
||||
"slacks": {
|
||||
),
|
||||
TrainingSample(
|
||||
slacks={
|
||||
"c1": 0.00,
|
||||
"c2": 0.00,
|
||||
"c3": 1.00,
|
||||
"c4": 0.0,
|
||||
}
|
||||
},
|
||||
),
|
||||
]
|
||||
instance.get_constraint_category = Mock(
|
||||
side_effect=lambda cid: {
|
||||
|
||||
@@ -23,9 +23,9 @@ from miplearn.types import (
|
||||
|
||||
@pytest.fixture
|
||||
def sample() -> TrainingSample:
|
||||
return {
|
||||
"LazyStatic: Enforced": {"c1", "c2", "c4"},
|
||||
}
|
||||
return TrainingSample(
|
||||
lazy_enforced={"c1", "c2", "c4"},
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@@ -101,7 +101,7 @@ def test_usage_with_solver(features: Features) -> None:
|
||||
)
|
||||
)
|
||||
|
||||
sample: TrainingSample = {}
|
||||
sample: TrainingSample = TrainingSample()
|
||||
stats: LearningSolveStats = {}
|
||||
|
||||
# LearningSolver calls before_solve_mip
|
||||
@@ -152,7 +152,7 @@ def test_usage_with_solver(features: Features) -> None:
|
||||
)
|
||||
|
||||
# Should update training sample
|
||||
assert sample["LazyStatic: Enforced"] == {"c1", "c2", "c3", "c4"}
|
||||
assert sample.lazy_enforced == {"c1", "c2", "c3", "c4"}
|
||||
|
||||
# Should update stats
|
||||
assert stats["LazyStatic: Removed"] == 1
|
||||
|
||||
@@ -26,27 +26,27 @@ def features() -> Features:
|
||||
|
||||
@pytest.fixture
|
||||
def sample() -> TrainingSample:
|
||||
return {
|
||||
"Lower bound": 1.0,
|
||||
"Upper bound": 2.0,
|
||||
"LP value": 3.0,
|
||||
}
|
||||
return TrainingSample(
|
||||
lower_bound=1.0,
|
||||
upper_bound=2.0,
|
||||
lp_value=3.0,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_without_lp() -> TrainingSample:
|
||||
return {
|
||||
"Lower bound": 1.0,
|
||||
"Upper bound": 2.0,
|
||||
}
|
||||
return TrainingSample(
|
||||
lower_bound=1.0,
|
||||
upper_bound=2.0,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_without_ub() -> TrainingSample:
|
||||
return {
|
||||
"Lower bound": 1.0,
|
||||
"LP value": 3.0,
|
||||
}
|
||||
return TrainingSample(
|
||||
lower_bound=1.0,
|
||||
lp_value=3.0,
|
||||
)
|
||||
|
||||
|
||||
def test_sample_xy(
|
||||
|
||||
@@ -38,8 +38,8 @@ def test_xy() -> None:
|
||||
}
|
||||
}
|
||||
)
|
||||
sample: TrainingSample = {
|
||||
"Solution": {
|
||||
sample = TrainingSample(
|
||||
solution={
|
||||
"x": {
|
||||
0: 0.0,
|
||||
1: 1.0,
|
||||
@@ -47,7 +47,7 @@ def test_xy() -> None:
|
||||
3: 0.0,
|
||||
}
|
||||
},
|
||||
"LP solution": {
|
||||
lp_solution={
|
||||
"x": {
|
||||
0: 0.1,
|
||||
1: 0.1,
|
||||
@@ -55,7 +55,7 @@ def test_xy() -> None:
|
||||
3: 0.1,
|
||||
}
|
||||
},
|
||||
}
|
||||
)
|
||||
x_expected = {
|
||||
"default": [
|
||||
[0.0, 0.0, 0.1],
|
||||
@@ -99,8 +99,8 @@ def test_xy_without_lp_solution() -> None:
|
||||
}
|
||||
}
|
||||
)
|
||||
sample: TrainingSample = {
|
||||
"Solution": {
|
||||
sample = TrainingSample(
|
||||
solution={
|
||||
"x": {
|
||||
0: 0.0,
|
||||
1: 1.0,
|
||||
@@ -108,7 +108,7 @@ def test_xy_without_lp_solution() -> None:
|
||||
3: 0.0,
|
||||
}
|
||||
},
|
||||
}
|
||||
)
|
||||
x_expected = {
|
||||
"default": [
|
||||
[0.0, 0.0],
|
||||
@@ -161,15 +161,15 @@ def test_predict() -> None:
|
||||
}
|
||||
}
|
||||
)
|
||||
sample: TrainingSample = {
|
||||
"LP solution": {
|
||||
sample = TrainingSample(
|
||||
lp_solution={
|
||||
"x": {
|
||||
0: 0.1,
|
||||
1: 0.5,
|
||||
2: 0.9,
|
||||
}
|
||||
}
|
||||
}
|
||||
)
|
||||
x, _ = PrimalSolutionComponent.sample_xy(features, sample)
|
||||
comp = PrimalSolutionComponent()
|
||||
comp.classifiers = {"default": clf}
|
||||
@@ -254,8 +254,8 @@ def test_evaluate() -> None:
|
||||
}
|
||||
}
|
||||
)
|
||||
sample: TrainingSample = {
|
||||
"Solution": {
|
||||
sample = TrainingSample(
|
||||
solution={
|
||||
"x": {
|
||||
0: 1.0,
|
||||
1: 1.0,
|
||||
@@ -264,7 +264,7 @@ def test_evaluate() -> None:
|
||||
4: 1.0,
|
||||
}
|
||||
}
|
||||
}
|
||||
)
|
||||
ev = comp.sample_evaluate(features, sample)
|
||||
assert ev == {
|
||||
0: classifier_evaluation_dict(tp=1, fp=1, tn=3, fn=0),
|
||||
|
||||
@@ -39,7 +39,7 @@ def test_instance():
|
||||
instance = TravelingSalesmanInstance(n_cities, distances)
|
||||
solver = LearningSolver()
|
||||
stats = solver.solve(instance)
|
||||
x = instance.training_data[0]["Solution"]["x"]
|
||||
x = instance.training_data[0].solution["x"]
|
||||
assert x[0, 1] == 1.0
|
||||
assert x[0, 2] == 0.0
|
||||
assert x[0, 3] == 1.0
|
||||
@@ -68,7 +68,7 @@ def test_subtour():
|
||||
solver.solve(instance)
|
||||
assert hasattr(instance, "found_violated_lazy_constraints")
|
||||
assert hasattr(instance, "found_violated_user_cuts")
|
||||
x = instance.training_data[0]["Solution"]["x"]
|
||||
x = instance.training_data[0].solution["x"]
|
||||
assert x[0, 1] == 1.0
|
||||
assert x[0, 4] == 1.0
|
||||
assert x[1, 2] == 1.0
|
||||
|
||||
@@ -27,22 +27,21 @@ def test_learning_solver():
|
||||
)
|
||||
|
||||
solver.solve(instance)
|
||||
|
||||
assert hasattr(instance, "features")
|
||||
|
||||
data = instance.training_data[0]
|
||||
assert data["Solution"]["x"][0] == 1.0
|
||||
assert data["Solution"]["x"][1] == 0.0
|
||||
assert data["Solution"]["x"][2] == 1.0
|
||||
assert data["Solution"]["x"][3] == 1.0
|
||||
assert data["Lower bound"] == 1183.0
|
||||
assert data["Upper bound"] == 1183.0
|
||||
assert round(data["LP solution"]["x"][0], 3) == 1.000
|
||||
assert round(data["LP solution"]["x"][1], 3) == 0.923
|
||||
assert round(data["LP solution"]["x"][2], 3) == 1.000
|
||||
assert round(data["LP solution"]["x"][3], 3) == 0.000
|
||||
assert round(data["LP value"], 3) == 1287.923
|
||||
assert len(data["MIP log"]) > 100
|
||||
sample = instance.training_data[0]
|
||||
assert sample.solution["x"][0] == 1.0
|
||||
assert sample.solution["x"][1] == 0.0
|
||||
assert sample.solution["x"][2] == 1.0
|
||||
assert sample.solution["x"][3] == 1.0
|
||||
assert sample.lower_bound == 1183.0
|
||||
assert sample.upper_bound == 1183.0
|
||||
assert round(sample.lp_solution["x"][0], 3) == 1.000
|
||||
assert round(sample.lp_solution["x"][1], 3) == 0.923
|
||||
assert round(sample.lp_solution["x"][2], 3) == 1.000
|
||||
assert round(sample.lp_solution["x"][3], 3) == 0.000
|
||||
assert round(sample.lp_value, 3) == 1287.923
|
||||
assert len(sample.mip_log) > 100
|
||||
|
||||
solver.fit([instance])
|
||||
solver.solve(instance)
|
||||
@@ -73,7 +72,7 @@ def test_parallel_solve():
|
||||
assert len(results) == 10
|
||||
for instance in instances:
|
||||
data = instance.training_data[0]
|
||||
assert len(data["Solution"]["x"].keys()) == 4
|
||||
assert len(data.solution["x"].keys()) == 4
|
||||
|
||||
|
||||
def test_solve_fit_from_disk():
|
||||
|
||||
Reference in New Issue
Block a user