mirror of
https://github.com/ANL-CEEESA/MIPLearn.git
synced 2025-12-06 01:18:52 -06:00
Simplify PrimalSolutionComponent; remove dynamic thresholds
This commit is contained in:
@@ -23,18 +23,13 @@ class PrimalSolutionComponent(Component):
|
||||
def __init__(self,
|
||||
classifier=AdaptiveClassifier(),
|
||||
mode="exact",
|
||||
max_fpr=[1e-3, 1e-3],
|
||||
min_threshold=[0.75, 0.75],
|
||||
dynamic_thresholds=True,
|
||||
threshold=0.50,
|
||||
):
|
||||
self.mode = mode
|
||||
self.is_warm_start_available = False
|
||||
self.max_fpr = max_fpr
|
||||
self.min_threshold = min_threshold
|
||||
self.thresholds = {}
|
||||
self.classifiers = {}
|
||||
self.threshold = threshold
|
||||
self.classifier_prototype = classifier
|
||||
self.dynamic_thresholds = dynamic_thresholds
|
||||
|
||||
def before_solve(self, solver, instance, model):
|
||||
solution = self.predict(instance)
|
||||
@@ -51,75 +46,46 @@ class PrimalSolutionComponent(Component):
|
||||
features = VariableFeaturesExtractor().extract(training_instances)
|
||||
solutions = SolutionExtractor().extract(training_instances)
|
||||
|
||||
def _fit(args):
|
||||
category, label = args[0], args[1]
|
||||
for category in features.keys():
|
||||
x_train = features[category]
|
||||
y_train = solutions[category]
|
||||
y = y_train[:, label].astype(int)
|
||||
for label in [0, 1]:
|
||||
y_train = solutions[category][:, label].astype(int)
|
||||
|
||||
if isinstance(self.classifier_prototype, list):
|
||||
clf = deepcopy(self.classifier_prototype[label])
|
||||
else:
|
||||
clf = deepcopy(self.classifier_prototype)
|
||||
clf.fit(x_train, y)
|
||||
# If all samples are either positive or negative, make constant predictions
|
||||
y_avg = np.average(y_train)
|
||||
if y_avg < 0.001 or y_avg >= 0.999:
|
||||
self.classifiers[category, label] = round(y_avg)
|
||||
continue
|
||||
|
||||
y_avg = np.average(y)
|
||||
if (not self.dynamic_thresholds) or y_avg <= 0.001 or y_avg >= 0.999:
|
||||
return {"classifier": clf,
|
||||
"threshold": self.min_threshold[label]}
|
||||
# Create a copy of classifier prototype and train it
|
||||
if isinstance(self.classifier_prototype, list):
|
||||
clf = deepcopy(self.classifier_prototype[label])
|
||||
else:
|
||||
clf = deepcopy(self.classifier_prototype)
|
||||
clf.fit(x_train, y_train)
|
||||
|
||||
proba = clf.predict_proba(x_train)
|
||||
assert isinstance(proba, np.ndarray), \
|
||||
"classifier should return numpy array"
|
||||
assert proba.shape == (x_train.shape[0], 2), \
|
||||
"classifier should return (%d,%d)-shaped array, not %s" % (
|
||||
x_train.shape[0], 2, str(proba.shape))
|
||||
|
||||
y_scores = proba[:, 1]
|
||||
fpr, tpr, thresholds = roc_curve(y, y_scores)
|
||||
k = 0
|
||||
while True:
|
||||
if (k + 1) > len(fpr):
|
||||
break
|
||||
if fpr[k + 1] > self.max_fpr[label]:
|
||||
break
|
||||
if thresholds[k + 1] < self.min_threshold[label]:
|
||||
break
|
||||
k = k + 1
|
||||
self.thresholds[category, label] = thresholds[k]
|
||||
|
||||
return {"classifier": clf,
|
||||
"threshold": thresholds[k]}
|
||||
|
||||
items = [(category, label)
|
||||
for category in features.keys()
|
||||
for label in [0, 1]]
|
||||
|
||||
if n_jobs == 1:
|
||||
results = list(map(_fit, tqdm(items, desc="Fit (primal)")))
|
||||
else:
|
||||
results = p_map(_fit, items, num_cpus=n_jobs)
|
||||
|
||||
for (idx, (category, label)) in enumerate(items):
|
||||
self.thresholds[category, label] = results[idx]["threshold"]
|
||||
self.classifiers[category, label] = results[idx]["classifier"]
|
||||
self.classifiers[category, label] = clf
|
||||
|
||||
def predict(self, instance):
|
||||
x_test = VariableFeaturesExtractor().extract([instance])
|
||||
solution = {}
|
||||
x_test = VariableFeaturesExtractor().extract([instance])
|
||||
var_split = Extractor.split_variables(instance)
|
||||
for category in var_split.keys():
|
||||
for (i, (var, index)) in enumerate(var_split[category]):
|
||||
if var not in solution.keys():
|
||||
solution[var] = {}
|
||||
solution[var][index] = None
|
||||
for label in [0, 1]:
|
||||
if (category, label) not in self.classifiers.keys():
|
||||
continue
|
||||
ws = self.classifiers[category, label].predict_proba(x_test[category])
|
||||
logger.debug("%s[%s] ws=%.6f threshold=%.6f" %
|
||||
(var, index, ws[i, 1], self.thresholds[category, label]))
|
||||
if ws[i, 1] >= self.thresholds[category, label]:
|
||||
for label in [0, 1]:
|
||||
if (category, label) not in self.classifiers.keys():
|
||||
continue
|
||||
clf = self.classifiers[category, label]
|
||||
if isinstance(clf, float):
|
||||
ws = np.array([[1-clf, clf]
|
||||
for _ in range(len(var_split[category]))])
|
||||
else:
|
||||
ws = clf.predict_proba(x_test[category])
|
||||
for (i, (var, index)) in enumerate(var_split[category]):
|
||||
if ws[i, 1] >= self.threshold:
|
||||
solution[var][index] = label
|
||||
return solution
|
||||
|
||||
|
||||
@@ -38,8 +38,7 @@ def test_evaluate():
|
||||
[0., 1.], # x[2] instances[0]
|
||||
[1., 0.], # x[3] instances[0]
|
||||
]))
|
||||
comp = PrimalSolutionComponent(classifier=[clf_zero, clf_one],
|
||||
dynamic_thresholds=False)
|
||||
comp = PrimalSolutionComponent(classifier=[clf_zero, clf_one])
|
||||
comp.fit(instances[:1])
|
||||
assert comp.predict(instances[0]) == {"x": {0: 0,
|
||||
1: 0,
|
||||
@@ -97,4 +96,3 @@ def test_primal_parallel_fit():
|
||||
comp = PrimalSolutionComponent()
|
||||
comp.fit(instances, n_jobs=2)
|
||||
assert len(comp.classifiers) == 2
|
||||
assert len(comp.thresholds) == 2
|
||||
|
||||
Reference in New Issue
Block a user