demo_darknet2onnx.py 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120
  1. import sys
  2. import onnx
  3. import os
  4. import argparse
  5. import numpy as np
  6. import cv2
  7. import onnxruntime
  8. import multiprocessing
  9. from tool.utils import *
  10. from tool.darknet2onnx import *
  11. def main():
  12. onnx_path = 'C:\\Users\\VINNO\\Desktop\\新建文件夹 (2)\\pytorch-YOLOv4-master\\20210824_yolov4_1_320_320_static11.onnx'
  13. sess_options = onnxruntime.SessionOptions()
  14. # sess_options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_DISABLE_ALL
  15. #
  16. #控制用于运行模型的线程数 controls the number of threads to use to run the model
  17. sess_options.intra_op_num_threads = 1
  18. #
  19. # #When sess_options.execution_mode = rt.ExecutionMode.ORT_PARALLEL,
  20. # # you can set sess_options.inter_op_num_threads to control the number of threads used to parallelize the execution of the graph (across nodes).
  21. # sess_options.execution_mode = onnxruntime.ExecutionMode.ORT_PARALLEL
  22. # sess_options.inter_op_num_threads = 1
  23. session = onnxruntime.InferenceSession(onnx_path, sess_options)
  24. # session = onnxruntime.InferenceSession(onnx_path)
  25. # print("The model expects input shape: ", session.get_inputs()[0].shape)
  26. IN_IMAGE_H = session.get_inputs()[0].shape[2]
  27. IN_IMAGE_W = session.get_inputs()[0].shape[3]
  28. input_name = session.get_inputs()[0].name
  29. print(IN_IMAGE_H)
  30. cap = cv2.VideoCapture("C:\\Users\\VINNO\\Desktop\\2.mp4")
  31. t1 = time.time()
  32. t = 0
  33. forwardtime = 0
  34. alltime = 0
  35. forward_progresstime = 0
  36. while (cap.isOpened()):
  37. ret, frame = cap.read()
  38. if ret == True:
  39. ttt_alltime = time.time()
  40. # Input
  41. resized = cv2.resize(frame, (IN_IMAGE_W, IN_IMAGE_H), interpolation=cv2.INTER_LINEAR)
  42. # img_in = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
  43. # img_in = np.expand_dims(img_in, axis=2)
  44. img_in = cv2.cvtColor(resized, cv2.COLOR_BGR2RGB)
  45. img_in = np.transpose(img_in, (2, 0, 1)).astype(np.float32)
  46. img_in = np.expand_dims(img_in, axis=0)
  47. img_in /= 255.0
  48. ttt_forwardtime = time.time()
  49. outputs = session.run(None, {input_name: img_in})
  50. temp_forwardtime = (time.time() - ttt_forwardtime) * 1000
  51. forwardtime += temp_forwardtime
  52. boxes = post_processing(img_in, 0.4, 0.6, outputs)
  53. temp_forward_progresstime = (time.time() - ttt_forwardtime) * 1000
  54. forward_progresstime += temp_forward_progresstime
  55. namesfile = 'breast.names'
  56. class_names = load_class_names(namesfile)
  57. dst_img = plot_boxes_cv2(frame, boxes[0], class_names=class_names)
  58. temp_alltime = (time.time() - ttt_alltime) * 1000
  59. alltime += temp_alltime
  60. print(temp_alltime)
  61. t += 1
  62. cv2.imshow('111', frame)
  63. if cv2.waitKey(1) & 0xff == ord('q'):
  64. break
  65. else:
  66. break
  67. tttt = time.time() - t1
  68. print("时间:{}".format(tttt))
  69. print(t)
  70. print("alltime平均时间:{}".format(alltime/t))
  71. print("forwardtime平均时间:{}".format(forwardtime/t))
  72. print("forward_progresstime平均时间:{}".format(forward_progresstime/t))
  73. print('{:.3f}'.format(tttt/t*1000))
  74. cap.release()
  75. cv2.destroyAllWindows()
  76. if __name__ == '__main__':
  77. print("Converting to onnx and running demo ...")
  78. # cfg_file = 'C:\\Users\\VINNO\\Desktop\\新建文件夹 (2)\\pytorch-YOLOv4-master\\yolov4-tiny-breast-256-anchors---20210820.cfg'
  79. # weight_file = 'C:\\Users\\VINNO\\Desktop\\新建文件夹 (2)\\pytorch-YOLOv4-master\\yolov4-tiny-breast-256-anchors---20210820_last.weights'
  80. # batch_size = 1
  81. #
  82. # if batch_size <= 0:
  83. # onnx_path_demo = transform_to_onnx(cfg_file, weight_file, batch_size)
  84. # else:
  85. # # Transform to onnx as specified batch size
  86. # transform_to_onnx(cfg_file, weight_file, batch_size)
  87. # # Transform to onnx as demo
  88. # onnx_path_demo = transform_to_onnx(cfg_file, weight_file, 1)
  89. # session = onnxruntime.InferenceSession(onnx_path_demo)
  90. #
  91. #
  92. # cpuCount = os.cpu_count()
  93. # print("Number of CPUs in the system:", cpuCount)
  94. main()