From 907c4fe14a88f9cb52a277b3e7e96eadbe378444 Mon Sep 17 00:00:00 2001 From: Alinson S Xavier Date: Wed, 15 Apr 2020 10:38:58 -0500 Subject: [PATCH] Make classifier evaluation metrics work when tp+fp=0 --- src/python/miplearn/components/__init__.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/python/miplearn/components/__init__.py b/src/python/miplearn/components/__init__.py index a3d12fe..8e1c524 100644 --- a/src/python/miplearn/components/__init__.py +++ b/src/python/miplearn/components/__init__.py @@ -18,12 +18,18 @@ def classifier_evaluation_dict(tp, tn, fp, fn): "Accuracy": (tp + tn) / (p + n), "F1 score": (2 * tp) / (2 * tp + fp + fn), } + if p > 0: d["Recall"] = tp / p - d["Precision"] = tp / (tp + fp) else: d["Recall"] = 1.0 + + if tp + fp > 0: + d["Precision"] = tp / (tp + fp) + else: d["Precision"] = 1.0 + + t = (p + n) / 100.0 d["Predicted positive (%)"] = d["Predicted positive"] / t d["Predicted negative (%)"] = d["Predicted negative"] / t