Make classifier evaluation metrics work when p=0

pull/3/head
Alinson S. Xavier 6 years ago
parent 8039ac7845
commit 5b02c6ea0e

@ -17,9 +17,13 @@ def classifier_evaluation_dict(tp, tn, fp, fn):
"False negative": fn, "False negative": fn,
"Accuracy": (tp + tn) / (p + n), "Accuracy": (tp + tn) / (p + n),
"F1 score": (2 * tp) / (2 * tp + fp + fn), "F1 score": (2 * tp) / (2 * tp + fp + fn),
"Recall": tp / p,
"Precision": tp / (tp + fp),
} }
if p > 0:
d["Recall"] = tp / p
d["Precision"] = tp / (tp + fp)
else:
d["Recall"] = 1.0
d["Precision"] = 1.0
t = (p + n) / 100.0 t = (p + n) / 100.0
d["Predicted positive (%)"] = d["Predicted positive"] / t d["Predicted positive (%)"] = d["Predicted positive"] / t
d["Predicted negative (%)"] = d["Predicted negative"] / t d["Predicted negative (%)"] = d["Predicted negative"] / t

Loading…
Cancel
Save