mirror of
https://github.com/ANL-CEEESA/MIPLearn.git
synced 2025-12-06 01:18:52 -06:00
Rename features.variables to variables_old; update FeatureExtractor
This commit is contained in:
@@ -28,7 +28,7 @@ def sample() -> Sample:
|
||||
sample = Sample(
|
||||
after_load=Features(
|
||||
instance=InstanceFeatures(),
|
||||
variables={
|
||||
variables_old={
|
||||
"x[0]": Variable(category="default"),
|
||||
"x[1]": Variable(category=None),
|
||||
"x[2]": Variable(category="default"),
|
||||
@@ -36,7 +36,7 @@ def sample() -> Sample:
|
||||
},
|
||||
),
|
||||
after_lp=Features(
|
||||
variables={
|
||||
variables_old={
|
||||
"x[0]": Variable(),
|
||||
"x[1]": Variable(),
|
||||
"x[2]": Variable(),
|
||||
@@ -44,7 +44,7 @@ def sample() -> Sample:
|
||||
},
|
||||
),
|
||||
after_mip=Features(
|
||||
variables={
|
||||
variables_old={
|
||||
"x[0]": Variable(value=0.0),
|
||||
"x[1]": Variable(value=1.0),
|
||||
"x[2]": Variable(value=1.0),
|
||||
@@ -53,13 +53,13 @@ def sample() -> Sample:
|
||||
),
|
||||
)
|
||||
sample.after_load.instance.to_list = Mock(return_value=[5.0]) # type: ignore
|
||||
sample.after_lp.variables["x[0]"].to_list = Mock( # type: ignore
|
||||
sample.after_lp.variables_old["x[0]"].to_list = Mock( # type: ignore
|
||||
return_value=[0.0, 0.0]
|
||||
)
|
||||
sample.after_lp.variables["x[2]"].to_list = Mock( # type: ignore
|
||||
sample.after_lp.variables_old["x[2]"].to_list = Mock( # type: ignore
|
||||
return_value=[1.0, 0.0]
|
||||
)
|
||||
sample.after_lp.variables["x[3]"].to_list = Mock( # type: ignore
|
||||
sample.after_lp.variables_old["x[3]"].to_list = Mock( # type: ignore
|
||||
return_value=[1.0, 1.0]
|
||||
)
|
||||
return sample
|
||||
|
||||
@@ -43,13 +43,13 @@ def test_instance() -> None:
|
||||
assert instance.samples[0].after_mip is not None
|
||||
features = instance.samples[0].after_mip
|
||||
assert features is not None
|
||||
assert features.variables is not None
|
||||
assert features.variables["x[(0, 1)]"].value == 1.0
|
||||
assert features.variables["x[(0, 2)]"].value == 0.0
|
||||
assert features.variables["x[(0, 3)]"].value == 1.0
|
||||
assert features.variables["x[(1, 2)]"].value == 1.0
|
||||
assert features.variables["x[(1, 3)]"].value == 0.0
|
||||
assert features.variables["x[(2, 3)]"].value == 1.0
|
||||
assert features.variables_old is not None
|
||||
assert features.variables_old["x[(0, 1)]"].value == 1.0
|
||||
assert features.variables_old["x[(0, 2)]"].value == 0.0
|
||||
assert features.variables_old["x[(0, 3)]"].value == 1.0
|
||||
assert features.variables_old["x[(1, 2)]"].value == 1.0
|
||||
assert features.variables_old["x[(1, 3)]"].value == 0.0
|
||||
assert features.variables_old["x[(2, 3)]"].value == 1.0
|
||||
assert features.mip_solve is not None
|
||||
assert features.mip_solve.mip_lower_bound == 4.0
|
||||
assert features.mip_solve.mip_upper_bound == 4.0
|
||||
@@ -79,12 +79,12 @@ def test_subtour() -> None:
|
||||
lazy_enforced = features.extra["lazy_enforced"]
|
||||
assert lazy_enforced is not None
|
||||
assert len(lazy_enforced) > 0
|
||||
assert features.variables is not None
|
||||
assert features.variables["x[(0, 1)]"].value == 1.0
|
||||
assert features.variables["x[(0, 4)]"].value == 1.0
|
||||
assert features.variables["x[(1, 2)]"].value == 1.0
|
||||
assert features.variables["x[(2, 3)]"].value == 1.0
|
||||
assert features.variables["x[(3, 5)]"].value == 1.0
|
||||
assert features.variables["x[(4, 5)]"].value == 1.0
|
||||
assert features.variables_old is not None
|
||||
assert features.variables_old["x[(0, 1)]"].value == 1.0
|
||||
assert features.variables_old["x[(0, 4)]"].value == 1.0
|
||||
assert features.variables_old["x[(1, 2)]"].value == 1.0
|
||||
assert features.variables_old["x[(2, 3)]"].value == 1.0
|
||||
assert features.variables_old["x[(3, 5)]"].value == 1.0
|
||||
assert features.variables_old["x[(4, 5)]"].value == 1.0
|
||||
solver.fit([instance])
|
||||
solver.solve(instance)
|
||||
|
||||
@@ -39,12 +39,12 @@ def test_learning_solver(
|
||||
|
||||
after_mip = sample.after_mip
|
||||
assert after_mip is not None
|
||||
assert after_mip.variables is not None
|
||||
assert after_mip.variables_old is not None
|
||||
assert after_mip.mip_solve is not None
|
||||
assert after_mip.variables["x[0]"].value == 1.0
|
||||
assert after_mip.variables["x[1]"].value == 0.0
|
||||
assert after_mip.variables["x[2]"].value == 1.0
|
||||
assert after_mip.variables["x[3]"].value == 1.0
|
||||
assert after_mip.variables_old["x[0]"].value == 1.0
|
||||
assert after_mip.variables_old["x[1]"].value == 0.0
|
||||
assert after_mip.variables_old["x[2]"].value == 1.0
|
||||
assert after_mip.variables_old["x[3]"].value == 1.0
|
||||
assert after_mip.mip_solve.mip_lower_bound == 1183.0
|
||||
assert after_mip.mip_solve.mip_upper_bound == 1183.0
|
||||
assert after_mip.mip_solve.mip_log is not None
|
||||
@@ -52,16 +52,16 @@ def test_learning_solver(
|
||||
|
||||
after_lp = sample.after_lp
|
||||
assert after_lp is not None
|
||||
assert after_lp.variables is not None
|
||||
assert after_lp.variables_old is not None
|
||||
assert after_lp.lp_solve is not None
|
||||
assert after_lp.variables["x[0]"].value is not None
|
||||
assert after_lp.variables["x[1]"].value is not None
|
||||
assert after_lp.variables["x[2]"].value is not None
|
||||
assert after_lp.variables["x[3]"].value is not None
|
||||
assert round(after_lp.variables["x[0]"].value, 3) == 1.000
|
||||
assert round(after_lp.variables["x[1]"].value, 3) == 0.923
|
||||
assert round(after_lp.variables["x[2]"].value, 3) == 1.000
|
||||
assert round(after_lp.variables["x[3]"].value, 3) == 0.000
|
||||
assert after_lp.variables_old["x[0]"].value is not None
|
||||
assert after_lp.variables_old["x[1]"].value is not None
|
||||
assert after_lp.variables_old["x[2]"].value is not None
|
||||
assert after_lp.variables_old["x[3]"].value is not None
|
||||
assert round(after_lp.variables_old["x[0]"].value, 3) == 1.000
|
||||
assert round(after_lp.variables_old["x[1]"].value, 3) == 0.923
|
||||
assert round(after_lp.variables_old["x[2]"].value, 3) == 1.000
|
||||
assert round(after_lp.variables_old["x[3]"].value, 3) == 0.000
|
||||
assert after_lp.lp_solve.lp_value is not None
|
||||
assert round(after_lp.lp_solve.lp_value, 3) == 1287.923
|
||||
assert after_lp.lp_solve.lp_log is not None
|
||||
|
||||
@@ -7,9 +7,15 @@ from miplearn.features import (
|
||||
InstanceFeatures,
|
||||
Variable,
|
||||
Constraint,
|
||||
VariableFeatures,
|
||||
)
|
||||
from miplearn.solvers.gurobi import GurobiSolver
|
||||
from miplearn.solvers.tests import assert_equals, _round_variables, _round_constraints
|
||||
from miplearn.solvers.tests import (
|
||||
assert_equals,
|
||||
_round_variables,
|
||||
_round_constraints,
|
||||
_round,
|
||||
)
|
||||
|
||||
inf = float("inf")
|
||||
|
||||
@@ -22,113 +28,36 @@ def test_knapsack() -> None:
|
||||
solver.solve_lp()
|
||||
|
||||
features = FeaturesExtractor(solver).extract(instance)
|
||||
assert features.variables is not None
|
||||
assert features.variables_old is not None
|
||||
assert features.constraints is not None
|
||||
assert features.instance is not None
|
||||
|
||||
assert_equals(
|
||||
_round_variables(features.variables),
|
||||
{
|
||||
"x[0]": Variable(
|
||||
basis_status="U",
|
||||
category="default",
|
||||
lower_bound=0.0,
|
||||
obj_coeff=505.0,
|
||||
reduced_cost=193.615385,
|
||||
sa_lb_down=-inf,
|
||||
sa_lb_up=1.0,
|
||||
sa_obj_down=311.384615,
|
||||
sa_obj_up=inf,
|
||||
sa_ub_down=0.913043,
|
||||
sa_ub_up=2.043478,
|
||||
type="B",
|
||||
upper_bound=1.0,
|
||||
user_features=[23.0, 505.0],
|
||||
value=1.0,
|
||||
alvarez_2017=[1.0, 0.32899, 0.0, 0.0, 1.0, 1.0, 5.265874, 46.051702],
|
||||
_round(features.variables),
|
||||
VariableFeatures(
|
||||
names=("x[0]", "x[1]", "x[2]", "x[3]", "z"),
|
||||
basis_status=("U", "B", "U", "L", "U"),
|
||||
categories=("default", "default", "default", "default", None),
|
||||
lower_bounds=(0.0, 0.0, 0.0, 0.0, 0.0),
|
||||
obj_coeffs=(505.0, 352.0, 458.0, 220.0, 0.0),
|
||||
reduced_costs=(193.615385, 0.0, 187.230769, -23.692308, 13.538462),
|
||||
sa_lb_down=(-inf, -inf, -inf, -0.111111, -inf),
|
||||
sa_lb_up=(1.0, 0.923077, 1.0, 1.0, 67.0),
|
||||
sa_obj_down=(311.384615, 317.777778, 270.769231, -inf, -13.538462),
|
||||
sa_obj_up=(inf, 570.869565, inf, 243.692308, inf),
|
||||
sa_ub_down=(0.913043, 0.923077, 0.9, 0.0, 43.0),
|
||||
sa_ub_up=(2.043478, inf, 2.2, inf, 69.0),
|
||||
types=("B", "B", "B", "B", "C"),
|
||||
upper_bounds=(1.0, 1.0, 1.0, 1.0, 67.0),
|
||||
user_features=(
|
||||
(23.0, 505.0),
|
||||
(26.0, 352.0),
|
||||
(20.0, 458.0),
|
||||
(18.0, 220.0),
|
||||
None,
|
||||
),
|
||||
"x[1]": Variable(
|
||||
basis_status="B",
|
||||
category="default",
|
||||
lower_bound=0.0,
|
||||
obj_coeff=352.0,
|
||||
reduced_cost=0.0,
|
||||
sa_lb_down=-inf,
|
||||
sa_lb_up=0.923077,
|
||||
sa_obj_down=317.777778,
|
||||
sa_obj_up=570.869565,
|
||||
sa_ub_down=0.923077,
|
||||
sa_ub_up=inf,
|
||||
type="B",
|
||||
upper_bound=1.0,
|
||||
user_features=[26.0, 352.0],
|
||||
value=0.923077,
|
||||
alvarez_2017=[
|
||||
1.0,
|
||||
0.229316,
|
||||
0.0,
|
||||
0.076923,
|
||||
1.0,
|
||||
1.0,
|
||||
3.532875,
|
||||
5.388476,
|
||||
],
|
||||
),
|
||||
"x[2]": Variable(
|
||||
basis_status="U",
|
||||
category="default",
|
||||
lower_bound=0.0,
|
||||
obj_coeff=458.0,
|
||||
reduced_cost=187.230769,
|
||||
sa_lb_down=-inf,
|
||||
sa_lb_up=1.0,
|
||||
sa_obj_down=270.769231,
|
||||
sa_obj_up=inf,
|
||||
sa_ub_down=0.9,
|
||||
sa_ub_up=2.2,
|
||||
type="B",
|
||||
upper_bound=1.0,
|
||||
user_features=[20.0, 458.0],
|
||||
value=1.0,
|
||||
alvarez_2017=[1.0, 0.298371, 0.0, 0.0, 1.0, 1.0, 5.232342, 46.051702],
|
||||
),
|
||||
"x[3]": Variable(
|
||||
basis_status="L",
|
||||
category="default",
|
||||
lower_bound=0.0,
|
||||
obj_coeff=220.0,
|
||||
reduced_cost=-23.692308,
|
||||
sa_lb_down=-0.111111,
|
||||
sa_lb_up=1.0,
|
||||
sa_obj_down=-inf,
|
||||
sa_obj_up=243.692308,
|
||||
sa_ub_down=0.0,
|
||||
sa_ub_up=inf,
|
||||
type="B",
|
||||
upper_bound=1.0,
|
||||
user_features=[18.0, 220.0],
|
||||
value=0.0,
|
||||
alvarez_2017=[1.0, 0.143322, 0.0, 0.0, 1.0, -1.0, 46.051702, 3.16515],
|
||||
),
|
||||
"z": Variable(
|
||||
basis_status="U",
|
||||
category=None,
|
||||
lower_bound=0.0,
|
||||
obj_coeff=0.0,
|
||||
reduced_cost=13.538462,
|
||||
sa_lb_down=-inf,
|
||||
sa_lb_up=67.0,
|
||||
sa_obj_down=-13.538462,
|
||||
sa_obj_up=inf,
|
||||
sa_ub_down=43.0,
|
||||
sa_ub_up=69.0,
|
||||
type="C",
|
||||
upper_bound=67.0,
|
||||
user_features=None,
|
||||
value=67.0,
|
||||
alvarez_2017=[0.0, 0.0, 0.0, 0.0, 1.0, -1.0, 0.0, 0.0],
|
||||
),
|
||||
},
|
||||
values=(1.0, 0.923077, 1.0, 0.0, 67.0),
|
||||
),
|
||||
)
|
||||
assert_equals(
|
||||
_round_constraints(features.constraints),
|
||||
|
||||
Reference in New Issue
Block a user