123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257 |
- """
- 分类:
- [{'Label': 0, 'Confidence': 1.0}]
- []
- """
- import numpy as np
- from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, confusion_matrix
- # 分类:
- from enum import Enum
- import sys
- class MethodAveragePrecision(Enum):
- """
- Class representing if the coordinates are relative to the
- image size or are absolute values.
- """
- EveryPointInterpolation = 1
- ElevenPointInterpolation = 2
- class Evaluator_classification():
- def __init__(self, iou_thres, method=MethodAveragePrecision.EveryPointInterpolation):
- self.wrong_file = {}
- self.background_images_all_indexs = []
- self.background_images_results = {}
- self.background_images_results_count = {}
- self.all_image_dict = {}
- self.all_no_background_images_pos_results = {}
- self.all_no_background_images_fp_results = {}
- self.all_no_background_images_tp_results = {}
- self.pred_results = []
- self.gt_results = []
- self.classes = []
- self.wrong_file['gt_wrong'] = []
- self.wrong_file['pred_wrong'] = []
- self.background_images_results_count['background_images_all_nums'] = 0
- self.all_image_dict['images_all_nums'] = 0
- def generate_metrics(self):
- classes = sorted(self.classes)
- ret = []
- assert len(self.gt_results) == len(self.pred_results)
- for c in classes:
- dects = []
- gts = []
- for i in range(len(self.pred_results)):
- if self.pred_results[i][1] == c:
- dects.append(self.pred_results[i])
- gts.append(self.gt_results[i])
- npos = 0
- for g in self.gt_results:
- if g[1] == c:
- npos += 1
- if str(c) not in self.all_no_background_images_pos_results.keys():
- self.all_no_background_images_pos_results[str(c)] = [g[0]]
- else:
- self.all_no_background_images_pos_results[str(c)].append(g[0])
- # sort detections by decreasing confidence
- TP = np.zeros(len(dects))
- FP = np.zeros(len(dects))
- for d in range(len(dects)):
- if dects[d][1] == gts[d][1]:
- TP[d] = 1
- if str(c) not in self.all_no_background_images_tp_results.keys():
- self.all_no_background_images_tp_results[str(c)] = [dects[d][0]]
- else:
- self.all_no_background_images_tp_results[str(c)].append(dects[d][0])
- else:
- FP[d] = 1 # count as false positive
- if str(c) not in self.all_no_background_images_fp_results.keys():
- self.all_no_background_images_fp_results[str(c)] = [dects[d][0]]
- else:
- self.all_no_background_images_fp_results[str(c)].append(dects[d][0])
- acc_FP = np.cumsum(FP)
- acc_TP = np.cumsum(TP)
- rec = 0 if npos == 0 else acc_TP / npos
- prec = np.divide(acc_TP, (acc_FP + acc_TP))
- # add class result in the dictionary to be returned
- r = {
- 'class': c,
- 'precision': prec,
- 'recall': rec,
- 'total positives': npos,
- 'total TP': np.sum(TP),
- 'total FP': np.sum(FP)
- }
- ret.append(r)
- return ret
- def add_batch(self, gt_file, pred_file, image_index):
- if gt_file == []:
- self.wrong_file['gt_wrong'].append(image_index)
- elif pred_file == []:
- self.wrong_file['pred_wrong'].append(image_index)
- else:
- self.all_image_dict['images_all_nums'] += 1
- assert len(gt_file) == 1 and len(pred_file) == 1, '分类标签必须为1'
- each_gt_label = gt_file[0]['Label']
- each_pred_label = pred_file[0]['Label']
- self.gt_results.append([image_index, each_gt_label])
- self.pred_results.append([image_index, each_pred_label])
- if each_gt_label not in self.classes:
- self.classes.append(each_gt_label)
- if each_pred_label not in self.classes:
- self.classes.append(each_pred_label)
- def sklearn_result(all_gt, all_pred, image_index, inferNetLabelCount):
- assert all_gt.shape == all_pred.shape, "gt和pred数量不对"
- wrong_gt_index = np.argwhere(all_gt == -1)
- wrong_pred_index = np.argwhere(all_pred == -1)
- wrong_pred = image_index[wrong_pred_index]
- wrong_gt = image_index[wrong_gt_index]
- print(wrong_pred.transpose())
- print(wrong_gt.transpose())
- # 将错误的gt和pred合并,并去除重复,
- wrong_index_concate = np.concatenate((wrong_gt_index, wrong_pred_index))
- b = np.ascontiguousarray(wrong_index_concate).view(
- np.dtype((np.void, wrong_index_concate.dtype.itemsize * wrong_index_concate.shape[1])))
- _, idx = np.unique(b, return_index=True)
- wrong_index = wrong_index_concate[idx]
- all_gt = np.delete(all_gt, wrong_index)
- all_pred = np.delete(all_pred, wrong_index)
- image_index = np.delete(image_index, wrong_index)
- # 横坐标是pred #纵坐标是gt
- confusionMatrix = confusion_matrix(all_gt, all_pred, [i for i in range(inferNetLabelCount)])
- print(confusionMatrix)
- accuracy = np.divide(np.diag(confusionMatrix).sum(), confusionMatrix.sum())
- accuracy2 = accuracy_score(all_gt, all_pred)
- gt_num = np.sum(confusionMatrix, axis=1)
- pre_num = np.sum(confusionMatrix, axis=0)
- tp = np.diag(confusionMatrix)
- fp = pre_num - tp
- precision = np.divide(np.diag(confusionMatrix), pre_num)
- precision2 = precision_score(all_gt, all_pred, average=None)
- print(precision)
- recall = np.divide(np.diag(confusionMatrix), gt_num)
- recall2 = recall_score(all_gt, all_pred, average=None)
- print(recall)
- if __name__ == "__main__":
- import random
- image_num = 100
- all_gt = np.zeros(image_num)
- all_pred = np.zeros(image_num)
- image_index = np.zeros(image_num)
- Count = 3
- evaluator = Evaluator_classification(iou_thres=0)
- for i in range(image_num):
- a1 = random.randint(0, Count)
- b1 = random.randint(0, Count)
- temp = random.randint(0, 10)
- if temp < 2:
- a = []
- b = [{'Label': b1, 'Confidence': 1.0}]
- all_gt[i] = -1
- all_pred[i] = b1
- elif 2 <= temp < 4:
- b = []
- a = [{'Label': a1, 'Confidence': 1.0}]
- all_gt[i] = a1
- all_pred[i] = -1
- else:
- a = [{'Label': a1, 'Confidence': 1.0}]
- b = [{'Label': b1, 'Confidence': 1.0}]
- all_gt[i] = a1
- all_pred[i] = b1
- image_index[i] = str(i)
- evaluator.add_batch(a, b, str(i))
- print("--------------------------------------可用图像report--------------------------------------")
- wrong_file = evaluator.wrong_file
- print("gt file有问题的image:{}".format(wrong_file['gt_wrong']))
- print("pred file有问题的image:{}".format(wrong_file['pred_wrong']))
- all_image_dict = evaluator.all_image_dict
- print('所有可用的图像数量:{}'.format(all_image_dict['images_all_nums']))
- print("--------------------------------------背景图像report--------------------------------------")
- background_images_results_count = evaluator.background_images_results_count
- for key in background_images_results_count.keys():
- print(key + ':' + str(background_images_results_count[key]))
- background_images_results = evaluator.background_images_results
- for key in background_images_results.keys():
- print(key + ':' + str(background_images_results[key]))
- print("--------------------------------------非背景图像report--------------------------------------")
- print('非背景图像数量:{}'.format(all_image_dict['images_all_nums']))
- metricsPerClass = evaluator.generate_metrics()
- for mc in metricsPerClass:
- c = mc['class']
- precision = mc['precision']
- recall = mc['recall']
- total_positives = mc['total positives']
- total_TP = mc['total TP']
- total_FP = mc['total FP']
- precision_all = 0 if (total_TP + total_FP) == 0 else total_TP / (total_TP + total_FP)
- recall_all = 0 if total_positives == 0 else total_TP / total_positives
- # Print AP per class
- print('Label:%s, total_TP: %d, total_FP: %d, total_positives_gt: %d, precision: %f, recall: %f '
- % (c, total_TP, total_FP, total_positives, precision_all, recall_all))
- try:
- average_precision = mc['AP']
- print('Label:%s, mAP: %f, ' % (c, average_precision))
- except:
- continue
- all_no_background_images_fp_results = evaluator.all_no_background_images_fp_results
- for key in all_no_background_images_fp_results.keys():
- each_result = all_no_background_images_fp_results[key]
- print('Label:' + key + ',FP对应的image:' + str(sorted(set(each_result), key=each_result.index)))
- all_no_background_images_pos_results = evaluator.all_no_background_images_pos_results
- all_no_background_images_tp_results = evaluator.all_no_background_images_tp_results
- for key in all_no_background_images_pos_results.keys():
- each_pos_results = all_no_background_images_pos_results[key]
- if key in all_no_background_images_tp_results.keys():
- each_tp_results = all_no_background_images_tp_results[key]
- else:
- each_tp_results = []
- if key in all_no_background_images_fp_results.keys():
- each_fp_results = all_no_background_images_fp_results[key]
- else:
- each_fp_results = []
- each_fn_results = []
- for elem in each_pos_results:
- if elem not in each_tp_results:
- each_fn_results.append(elem)
- print('Label:' + key + ',FN对应的image:' + str(each_fn_results))
- # print(evaluator.wrong_gt_file)
- # print(evaluator.wrong_pred_file)
- # print(evaluator.get_recall())
- # print(evaluator.all_pre_gt_result_image_name_dict)
- sklearn_result(all_gt, all_pred, image_index, Count + 1)
|