mirror of
https://github.com/ANL-CEEESA/MIPLearn.git
synced 2025-12-06 01:18:52 -06:00
Remove obsolete methods
This commit is contained in:
@@ -136,15 +136,6 @@ class GurobiSolver(InternalSolver):
|
||||
var.lb = value
|
||||
var.ub = value
|
||||
|
||||
@overrides
|
||||
def get_dual(self, cid: str) -> float:
|
||||
assert self.model is not None
|
||||
c = self.model.getConstrByName(cid)
|
||||
if self.is_infeasible():
|
||||
return c.farkasDual
|
||||
else:
|
||||
return c.pi
|
||||
|
||||
@overrides
|
||||
def get_constraint_attrs(self) -> List[str]:
|
||||
return [
|
||||
@@ -175,14 +166,6 @@ class GurobiSolver(InternalSolver):
|
||||
constraints[c.constrName] = constr
|
||||
return constraints
|
||||
|
||||
@overrides
|
||||
def get_sense(self) -> str:
|
||||
assert self.model is not None
|
||||
if self.model.modelSense == 1:
|
||||
return "min"
|
||||
else:
|
||||
return "max"
|
||||
|
||||
@overrides
|
||||
def get_solution(self) -> Optional[Solution]:
|
||||
assert self.model is not None
|
||||
@@ -224,12 +207,6 @@ class GurobiSolver(InternalSolver):
|
||||
"value",
|
||||
]
|
||||
|
||||
@overrides
|
||||
def get_variable_names(self) -> List[VariableName]:
|
||||
self._raise_if_callback()
|
||||
assert self.model is not None
|
||||
return [v.varName for v in self.model.getVars()]
|
||||
|
||||
@overrides
|
||||
def get_variables(self) -> Dict[str, Variable]:
|
||||
assert self.model is not None
|
||||
|
||||
@@ -124,10 +124,8 @@ class InternalSolver(ABC, EnforceOverrides):
|
||||
"""
|
||||
Sets the warm start to be used by the solver.
|
||||
|
||||
The solution should be a dictionary following the same format as the
|
||||
one produced by `get_solution`. Only one warm start is supported.
|
||||
Calling this function when a warm start already exists will
|
||||
remove the previous warm start.
|
||||
Only one warm start is supported. Calling this function when a warm start
|
||||
already exists will remove the previous warm start.
|
||||
"""
|
||||
pass
|
||||
|
||||
@@ -154,11 +152,8 @@ class InternalSolver(ABC, EnforceOverrides):
|
||||
@abstractmethod
|
||||
def fix(self, solution: Solution) -> None:
|
||||
"""
|
||||
Fixes the values of a subset of decision variables.
|
||||
|
||||
The values should be provided in the dictionary format generated by
|
||||
`get_solution`. Missing values in the solution indicate variables
|
||||
that should be left free.
|
||||
Fixes the values of a subset of decision variables. Missing values in the
|
||||
solution indicate variables that should be left free.
|
||||
"""
|
||||
pass
|
||||
|
||||
@@ -170,9 +165,7 @@ class InternalSolver(ABC, EnforceOverrides):
|
||||
with higher priority are picked first, given that they are fractional.
|
||||
Ties are solved arbitrarily. By default, all variables have priority zero.
|
||||
|
||||
The priorities should be provided in the dictionary format generated by
|
||||
`get_solution`. Missing values indicate variables whose priorities
|
||||
should not be modified.
|
||||
Missing values indicate variables whose priorities should not be modified.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
@@ -216,34 +209,6 @@ class InternalSolver(ABC, EnforceOverrides):
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_dual(self, cid: str) -> float:
|
||||
"""
|
||||
If the model is feasible and has been solved to optimality, returns the
|
||||
optimal value of the dual variable associated with this constraint. If the
|
||||
model is infeasible, returns a portion of the infeasibility certificate
|
||||
corresponding to the given constraint.
|
||||
|
||||
Only available for relaxed problems. Must be called after solve.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_sense(self) -> str:
|
||||
"""
|
||||
Returns the sense of the problem (either "min" or "max").
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_variable_names(self) -> List[VariableName]:
|
||||
"""
|
||||
Returns a list containing the names of all variables in the model. This
|
||||
method is used by the ML components to query what variables are there in the
|
||||
model before a solution is available.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def clone(self) -> "InternalSolver":
|
||||
"""
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
import logging
|
||||
import traceback
|
||||
from typing import Optional, List, Any, cast, Callable, Dict, Tuple
|
||||
from typing import Optional, List, Any, cast, Dict, Tuple
|
||||
|
||||
from p_tqdm import p_map
|
||||
|
||||
@@ -13,7 +13,7 @@ from miplearn.components.dynamic_lazy import DynamicLazyConstraintsComponent
|
||||
from miplearn.components.dynamic_user_cuts import UserCutsComponent
|
||||
from miplearn.components.objective import ObjectiveValueComponent
|
||||
from miplearn.components.primal import PrimalSolutionComponent
|
||||
from miplearn.features import FeaturesExtractor, TrainingSample, Sample
|
||||
from miplearn.features import FeaturesExtractor, Sample
|
||||
from miplearn.instance.base import Instance
|
||||
from miplearn.instance.picklegz import PickleGzInstance
|
||||
from miplearn.solvers import _RedirectOutput
|
||||
@@ -138,9 +138,7 @@ class LearningSolver:
|
||||
|
||||
# Initialize training sample
|
||||
# -------------------------------------------------------
|
||||
training_sample = TrainingSample()
|
||||
sample = Sample()
|
||||
instance.training_data.append(training_sample)
|
||||
instance.samples.append(sample)
|
||||
|
||||
# Initialize stats
|
||||
@@ -160,7 +158,6 @@ class LearningSolver:
|
||||
logger.info("Extracting features (after-load)...")
|
||||
features = FeaturesExtractor(self.internal_solver).extract(instance)
|
||||
features.extra = {}
|
||||
instance.features.__dict__ = features.__dict__
|
||||
sample.after_load = features
|
||||
|
||||
callback_args = (
|
||||
@@ -171,15 +168,6 @@ class LearningSolver:
|
||||
sample,
|
||||
)
|
||||
|
||||
callback_args_old = (
|
||||
self,
|
||||
instance,
|
||||
model,
|
||||
stats,
|
||||
instance.features,
|
||||
training_sample,
|
||||
)
|
||||
|
||||
# Solve root LP relaxation
|
||||
# -------------------------------------------------------
|
||||
lp_stats = None
|
||||
@@ -187,19 +175,14 @@ class LearningSolver:
|
||||
logger.debug("Running before_solve_lp callbacks...")
|
||||
for component in self.components.values():
|
||||
component.before_solve_lp(*callback_args)
|
||||
component.before_solve_lp_old(*callback_args_old)
|
||||
|
||||
logger.info("Solving root LP relaxation...")
|
||||
lp_stats = self.internal_solver.solve_lp(tee=tee)
|
||||
stats.update(cast(LearningSolveStats, lp_stats.__dict__))
|
||||
training_sample.lp_solution = self.internal_solver.get_solution()
|
||||
training_sample.lp_value = lp_stats.lp_value
|
||||
training_sample.lp_log = lp_stats.lp_log
|
||||
|
||||
logger.debug("Running after_solve_lp callbacks...")
|
||||
for component in self.components.values():
|
||||
component.after_solve_lp(*callback_args)
|
||||
component.after_solve_lp_old(*callback_args_old)
|
||||
|
||||
# Extract features (after-lp)
|
||||
# -------------------------------------------------------
|
||||
@@ -245,7 +228,6 @@ class LearningSolver:
|
||||
logger.debug("Running before_solve_mip callbacks...")
|
||||
for component in self.components.values():
|
||||
component.before_solve_mip(*callback_args)
|
||||
component.before_solve_mip_old(*callback_args_old)
|
||||
|
||||
# Solve MIP
|
||||
# -------------------------------------------------------
|
||||
@@ -272,19 +254,11 @@ class LearningSolver:
|
||||
features.extra = {}
|
||||
sample.after_mip = features
|
||||
|
||||
# Add some information to training_sample
|
||||
# -------------------------------------------------------
|
||||
training_sample.lower_bound = mip_stats.mip_lower_bound
|
||||
training_sample.upper_bound = mip_stats.mip_upper_bound
|
||||
training_sample.mip_log = mip_stats.mip_log
|
||||
training_sample.solution = self.internal_solver.get_solution()
|
||||
|
||||
# After-solve callbacks
|
||||
# -------------------------------------------------------
|
||||
logger.debug("Calling after_solve_mip callbacks...")
|
||||
for component in self.components.values():
|
||||
component.after_solve_mip(*callback_args)
|
||||
component.after_solve_mip_old(*callback_args_old)
|
||||
|
||||
# Flush
|
||||
# -------------------------------------------------------
|
||||
@@ -414,12 +388,11 @@ class LearningSolver:
|
||||
|
||||
def fit(self, training_instances: List[Instance]) -> None:
|
||||
if len(training_instances) == 0:
|
||||
logger.warn("Empty list of training instances provided. Skipping.")
|
||||
logger.warning("Empty list of training instances provided. Skipping.")
|
||||
return
|
||||
for component in self.components.values():
|
||||
logger.info(f"Fitting {component.__class__.__name__}...")
|
||||
component.fit(training_instances)
|
||||
component.fit_old(training_instances)
|
||||
|
||||
def _add_component(self, component: Component) -> None:
|
||||
name = component.__class__.__name__
|
||||
|
||||
@@ -155,11 +155,6 @@ class BasePyomoSolver(InternalSolver):
|
||||
"slack",
|
||||
]
|
||||
|
||||
@overrides
|
||||
def get_dual(self, cid: str) -> float:
|
||||
constr = self._cname_to_constr[cid]
|
||||
return self._pyomo_solver.dual[constr]
|
||||
|
||||
@overrides
|
||||
def get_solution(self) -> Optional[Solution]:
|
||||
assert self.model is not None
|
||||
@@ -173,21 +168,6 @@ class BasePyomoSolver(InternalSolver):
|
||||
solution[f"{var}[{index}]"] = var[index].value
|
||||
return solution
|
||||
|
||||
@overrides
|
||||
def get_variable_names(self) -> List[VariableName]:
|
||||
assert self.model is not None
|
||||
variables: List[VariableName] = []
|
||||
for var in self.model.component_objects(Var):
|
||||
for index in var:
|
||||
if var[index].fixed:
|
||||
continue
|
||||
variables += [f"{var}[{index}]"]
|
||||
return variables
|
||||
|
||||
@overrides
|
||||
def get_sense(self) -> str:
|
||||
return self._obj_sense
|
||||
|
||||
@overrides
|
||||
def get_variables(self) -> Dict[str, Variable]:
|
||||
assert self.model is not None
|
||||
|
||||
Reference in New Issue
Block a user