mirror of
https://github.com/ANL-CEEESA/MIPLearn.git
synced 2025-12-06 01:18:52 -06:00
Use compact variable features everywhere
This commit is contained in:
@@ -13,10 +13,10 @@ from miplearn.classifiers.threshold import Threshold
|
||||
from miplearn.components import classifier_evaluation_dict
|
||||
from miplearn.components.primal import PrimalSolutionComponent
|
||||
from miplearn.features import (
|
||||
Variable,
|
||||
Features,
|
||||
Sample,
|
||||
InstanceFeatures,
|
||||
VariableFeatures,
|
||||
)
|
||||
from miplearn.problems.tsp import TravelingSalesmanGenerator
|
||||
from miplearn.solvers.learning import LearningSolver
|
||||
@@ -28,39 +28,37 @@ def sample() -> Sample:
|
||||
sample = Sample(
|
||||
after_load=Features(
|
||||
instance=InstanceFeatures(),
|
||||
variables_old={
|
||||
"x[0]": Variable(category="default"),
|
||||
"x[1]": Variable(category=None),
|
||||
"x[2]": Variable(category="default"),
|
||||
"x[3]": Variable(category="default"),
|
||||
},
|
||||
variables=VariableFeatures(
|
||||
names=("x[0]", "x[1]", "x[2]", "x[3]"),
|
||||
categories=("default", None, "default", "default"),
|
||||
),
|
||||
),
|
||||
after_lp=Features(
|
||||
variables_old={
|
||||
"x[0]": Variable(),
|
||||
"x[1]": Variable(),
|
||||
"x[2]": Variable(),
|
||||
"x[3]": Variable(),
|
||||
},
|
||||
variables=VariableFeatures(),
|
||||
),
|
||||
after_mip=Features(
|
||||
variables_old={
|
||||
"x[0]": Variable(value=0.0),
|
||||
"x[1]": Variable(value=1.0),
|
||||
"x[2]": Variable(value=1.0),
|
||||
"x[3]": Variable(value=0.0),
|
||||
}
|
||||
variables=VariableFeatures(
|
||||
names=("x[0]", "x[1]", "x[2]", "x[3]"),
|
||||
values=(0.0, 1.0, 1.0, 0.0),
|
||||
)
|
||||
),
|
||||
)
|
||||
sample.after_load.instance.to_list = Mock(return_value=[5.0]) # type: ignore
|
||||
sample.after_lp.variables_old["x[0]"].to_list = Mock( # type: ignore
|
||||
return_value=[0.0, 0.0]
|
||||
sample.after_load.variables.to_list = Mock( # type:ignore
|
||||
side_effect=lambda i: [
|
||||
[0.0, 0.0],
|
||||
None,
|
||||
[1.0, 0.0],
|
||||
[1.0, 1.0],
|
||||
][i]
|
||||
)
|
||||
sample.after_lp.variables_old["x[2]"].to_list = Mock( # type: ignore
|
||||
return_value=[1.0, 0.0]
|
||||
)
|
||||
sample.after_lp.variables_old["x[3]"].to_list = Mock( # type: ignore
|
||||
return_value=[1.0, 1.0]
|
||||
sample.after_lp.variables.to_list = Mock( # type:ignore
|
||||
side_effect=lambda i: [
|
||||
[2.0, 2.0],
|
||||
None,
|
||||
[3.0, 2.0],
|
||||
[3.0, 3.0],
|
||||
][i]
|
||||
)
|
||||
return sample
|
||||
|
||||
@@ -68,9 +66,9 @@ def sample() -> Sample:
|
||||
def test_xy(sample: Sample) -> None:
|
||||
x_expected = {
|
||||
"default": [
|
||||
[5.0, 0.0, 0.0],
|
||||
[5.0, 1.0, 0.0],
|
||||
[5.0, 1.0, 1.0],
|
||||
[5.0, 0.0, 0.0, 2.0, 2.0],
|
||||
[5.0, 1.0, 0.0, 3.0, 2.0],
|
||||
[5.0, 1.0, 1.0, 3.0, 3.0],
|
||||
]
|
||||
}
|
||||
y_expected = {
|
||||
|
||||
@@ -43,13 +43,8 @@ def test_instance() -> None:
|
||||
assert instance.samples[0].after_mip is not None
|
||||
features = instance.samples[0].after_mip
|
||||
assert features is not None
|
||||
assert features.variables_old is not None
|
||||
assert features.variables_old["x[(0, 1)]"].value == 1.0
|
||||
assert features.variables_old["x[(0, 2)]"].value == 0.0
|
||||
assert features.variables_old["x[(0, 3)]"].value == 1.0
|
||||
assert features.variables_old["x[(1, 2)]"].value == 1.0
|
||||
assert features.variables_old["x[(1, 3)]"].value == 0.0
|
||||
assert features.variables_old["x[(2, 3)]"].value == 1.0
|
||||
assert features.variables is not None
|
||||
assert features.variables.values == (1.0, 0.0, 1.0, 1.0, 0.0, 1.0)
|
||||
assert features.mip_solve is not None
|
||||
assert features.mip_solve.mip_lower_bound == 4.0
|
||||
assert features.mip_solve.mip_upper_bound == 4.0
|
||||
@@ -79,12 +74,23 @@ def test_subtour() -> None:
|
||||
lazy_enforced = features.extra["lazy_enforced"]
|
||||
assert lazy_enforced is not None
|
||||
assert len(lazy_enforced) > 0
|
||||
assert features.variables_old is not None
|
||||
assert features.variables_old["x[(0, 1)]"].value == 1.0
|
||||
assert features.variables_old["x[(0, 4)]"].value == 1.0
|
||||
assert features.variables_old["x[(1, 2)]"].value == 1.0
|
||||
assert features.variables_old["x[(2, 3)]"].value == 1.0
|
||||
assert features.variables_old["x[(3, 5)]"].value == 1.0
|
||||
assert features.variables_old["x[(4, 5)]"].value == 1.0
|
||||
assert features.variables is not None
|
||||
assert features.variables.values == (
|
||||
1.0,
|
||||
0.0,
|
||||
0.0,
|
||||
1.0,
|
||||
0.0,
|
||||
1.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
1.0,
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
1.0,
|
||||
1.0,
|
||||
)
|
||||
solver.fit([instance])
|
||||
solver.solve(instance)
|
||||
|
||||
@@ -16,6 +16,7 @@ from miplearn.solvers.internal import InternalSolver
|
||||
from miplearn.solvers.learning import LearningSolver
|
||||
|
||||
# noinspection PyUnresolvedReferences
|
||||
from miplearn.solvers.tests import _round
|
||||
from tests.solvers.test_internal_solver import internal_solvers
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -39,12 +40,9 @@ def test_learning_solver(
|
||||
|
||||
after_mip = sample.after_mip
|
||||
assert after_mip is not None
|
||||
assert after_mip.variables_old is not None
|
||||
assert after_mip.variables is not None
|
||||
assert after_mip.variables.values == (1.0, 0.0, 1.0, 1.0, 61.0)
|
||||
assert after_mip.mip_solve is not None
|
||||
assert after_mip.variables_old["x[0]"].value == 1.0
|
||||
assert after_mip.variables_old["x[1]"].value == 0.0
|
||||
assert after_mip.variables_old["x[2]"].value == 1.0
|
||||
assert after_mip.variables_old["x[3]"].value == 1.0
|
||||
assert after_mip.mip_solve.mip_lower_bound == 1183.0
|
||||
assert after_mip.mip_solve.mip_upper_bound == 1183.0
|
||||
assert after_mip.mip_solve.mip_log is not None
|
||||
@@ -52,16 +50,9 @@ def test_learning_solver(
|
||||
|
||||
after_lp = sample.after_lp
|
||||
assert after_lp is not None
|
||||
assert after_lp.variables_old is not None
|
||||
assert after_lp.variables is not None
|
||||
assert _round(after_lp.variables.values) == (1.0, 0.923077, 1.0, 0.0, 67.0)
|
||||
assert after_lp.lp_solve is not None
|
||||
assert after_lp.variables_old["x[0]"].value is not None
|
||||
assert after_lp.variables_old["x[1]"].value is not None
|
||||
assert after_lp.variables_old["x[2]"].value is not None
|
||||
assert after_lp.variables_old["x[3]"].value is not None
|
||||
assert round(after_lp.variables_old["x[0]"].value, 3) == 1.000
|
||||
assert round(after_lp.variables_old["x[1]"].value, 3) == 0.923
|
||||
assert round(after_lp.variables_old["x[2]"].value, 3) == 1.000
|
||||
assert round(after_lp.variables_old["x[3]"].value, 3) == 0.000
|
||||
assert after_lp.lp_solve.lp_value is not None
|
||||
assert round(after_lp.lp_solve.lp_value, 3) == 1287.923
|
||||
assert after_lp.lp_solve.lp_log is not None
|
||||
|
||||
@@ -5,14 +5,12 @@
|
||||
from miplearn.features import (
|
||||
FeaturesExtractor,
|
||||
InstanceFeatures,
|
||||
Variable,
|
||||
Constraint,
|
||||
VariableFeatures,
|
||||
)
|
||||
from miplearn.solvers.gurobi import GurobiSolver
|
||||
from miplearn.solvers.tests import (
|
||||
assert_equals,
|
||||
_round_variables,
|
||||
_round_constraints,
|
||||
_round,
|
||||
)
|
||||
@@ -28,7 +26,7 @@ def test_knapsack() -> None:
|
||||
solver.solve_lp()
|
||||
|
||||
features = FeaturesExtractor(solver).extract(instance)
|
||||
assert features.variables_old is not None
|
||||
assert features.variables is not None
|
||||
assert features.constraints is not None
|
||||
assert features.instance is not None
|
||||
|
||||
@@ -57,6 +55,13 @@ def test_knapsack() -> None:
|
||||
None,
|
||||
),
|
||||
values=(1.0, 0.923077, 1.0, 0.0, 67.0),
|
||||
alvarez_2017=[
|
||||
[1.0, 0.32899, 0.0, 0.0, 1.0, 1.0, 5.265874, 46.051702],
|
||||
[1.0, 0.229316, 0.0, 0.076923, 1.0, 1.0, 3.532875, 5.388476],
|
||||
[1.0, 0.298371, 0.0, 0.0, 1.0, 1.0, 5.232342, 46.051702],
|
||||
[1.0, 0.143322, 0.0, 0.0, 1.0, -1.0, 46.051702, 3.16515],
|
||||
[0.0, 0.0, 0.0, 0.0, 1.0, -1.0, 0.0, 0.0],
|
||||
],
|
||||
),
|
||||
)
|
||||
assert_equals(
|
||||
|
||||
Reference in New Issue
Block a user