From 5b02c6ea0eff4bc40e7a6ee51256821992bd4fd2 Mon Sep 17 00:00:00 2001 From: Alinson S Xavier Date: Wed, 15 Apr 2020 10:36:47 -0500 Subject: [PATCH] Make classifier evaluation metrics work when p=0 --- src/python/miplearn/components/__init__.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/python/miplearn/components/__init__.py b/src/python/miplearn/components/__init__.py index cad2cb9..a3d12fe 100644 --- a/src/python/miplearn/components/__init__.py +++ b/src/python/miplearn/components/__init__.py @@ -17,9 +17,13 @@ def classifier_evaluation_dict(tp, tn, fp, fn): "False negative": fn, "Accuracy": (tp + tn) / (p + n), "F1 score": (2 * tp) / (2 * tp + fp + fn), - "Recall": tp / p, - "Precision": tp / (tp + fp), } + if p > 0: + d["Recall"] = tp / p + d["Precision"] = tp / (tp + fp) + else: + d["Recall"] = 1.0 + d["Precision"] = 1.0 t = (p + n) / 100.0 d["Predicted positive (%)"] = d["Predicted positive"] / t d["Predicted negative (%)"] = d["Predicted negative"] / t