streamlit_inference.py 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154
  1. # Ultralytics YOLO 🚀, AGPL-3.0 license
  2. import io
  3. import time
  4. import cv2
  5. import torch
  6. def inference():
  7. """Runs real-time object detection on video input using Ultralytics YOLOv8 in a Streamlit application."""
  8. # Scope imports for faster ultralytics package load speeds
  9. import streamlit as st
  10. from ultralytics import YOLO
  11. # Hide main menu style
  12. menu_style_cfg = """<style>MainMenu {visibility: hidden;}</style>"""
  13. # Main title of streamlit application
  14. main_title_cfg = """<div><h1 style="color:#FF64DA; text-align:center; font-size:40px;
  15. font-family: 'Archivo', sans-serif; margin-top:-50px;margin-bottom:20px;">
  16. Ultralytics YOLOv8 Streamlit Application
  17. </h1></div>"""
  18. # Subtitle of streamlit application
  19. sub_title_cfg = """<div><h4 style="color:#042AFF; text-align:center;
  20. font-family: 'Archivo', sans-serif; margin-top:-15px; margin-bottom:50px;">
  21. Experience real-time object detection on your webcam with the power of Ultralytics YOLOv8! 🚀</h4>
  22. </div>"""
  23. # Set html page configuration
  24. st.set_page_config(page_title="Ultralytics Streamlit App", layout="wide", initial_sidebar_state="auto")
  25. # Append the custom HTML
  26. st.markdown(menu_style_cfg, unsafe_allow_html=True)
  27. st.markdown(main_title_cfg, unsafe_allow_html=True)
  28. st.markdown(sub_title_cfg, unsafe_allow_html=True)
  29. # Add ultralytics logo in sidebar
  30. with st.sidebar:
  31. logo = "https://raw.githubusercontent.com/ultralytics/assets/main/logo/Ultralytics_Logotype_Original.svg"
  32. st.image(logo, width=250)
  33. # Add elements to vertical setting menu
  34. st.sidebar.title("User Configuration")
  35. # Add video source selection dropdown
  36. source = st.sidebar.selectbox(
  37. "Video",
  38. ("webcam", "video"),
  39. )
  40. vid_file_name = ""
  41. if source == "video":
  42. vid_file = st.sidebar.file_uploader("Upload Video File", type=["mp4", "mov", "avi", "mkv"])
  43. if vid_file is not None:
  44. g = io.BytesIO(vid_file.read()) # BytesIO Object
  45. vid_location = "ultralytics.mp4"
  46. with open(vid_location, "wb") as out: # Open temporary file as bytes
  47. out.write(g.read()) # Read bytes into file
  48. vid_file_name = "ultralytics.mp4"
  49. elif source == "webcam":
  50. vid_file_name = 0
  51. # Add dropdown menu for model selection
  52. yolov8_model = st.sidebar.selectbox(
  53. "Model",
  54. (
  55. "YOLOv8n",
  56. "YOLOv8s",
  57. "YOLOv8m",
  58. "YOLOv8l",
  59. "YOLOv8x",
  60. "YOLOv8n-Seg",
  61. "YOLOv8s-Seg",
  62. "YOLOv8m-Seg",
  63. "YOLOv8l-Seg",
  64. "YOLOv8x-Seg",
  65. "YOLOv8n-Pose",
  66. "YOLOv8s-Pose",
  67. "YOLOv8m-Pose",
  68. "YOLOv8l-Pose",
  69. "YOLOv8x-Pose",
  70. ),
  71. )
  72. model = YOLO(f"{yolov8_model.lower()}.pt") # Load the yolov8 model
  73. class_names = list(model.names.values()) # Convert dictionary to list of class names
  74. # Multiselect box with class names and get indices of selected classes
  75. selected_classes = st.sidebar.multiselect("Classes", class_names, default=class_names[:3])
  76. selected_ind = [class_names.index(option) for option in selected_classes]
  77. if not isinstance(selected_ind, list): # Ensure selected_options is a list
  78. selected_ind = list(selected_ind)
  79. conf_thres = st.sidebar.slider("Confidence Threshold", 0.0, 1.0, 0.25, 0.01)
  80. nms_thres = st.sidebar.slider("NMS Threshold", 0.0, 1.0, 0.45, 0.01)
  81. col1, col2 = st.columns(2)
  82. org_frame = col1.empty()
  83. ann_frame = col2.empty()
  84. fps_display = st.sidebar.empty() # Placeholder for FPS display
  85. if st.sidebar.button("Start"):
  86. videocapture = cv2.VideoCapture(vid_file_name) # Capture the video
  87. if not videocapture.isOpened():
  88. st.error("Could not open webcam.")
  89. stop_button = st.button("Stop") # Button to stop the inference
  90. prev_time = 0
  91. while videocapture.isOpened():
  92. success, frame = videocapture.read()
  93. if not success:
  94. st.warning("Failed to read frame from webcam. Please make sure the webcam is connected properly.")
  95. break
  96. curr_time = time.time()
  97. fps = 1 / (curr_time - prev_time)
  98. prev_time = curr_time
  99. # Store model predictions
  100. results = model(frame, conf=float(conf_thres), iou=float(nms_thres), classes=selected_ind)
  101. annotated_frame = results[0].plot() # Add annotations on frame
  102. # display frame
  103. org_frame.image(frame, channels="BGR")
  104. ann_frame.image(annotated_frame, channels="BGR")
  105. if stop_button:
  106. videocapture.release() # Release the capture
  107. torch.cuda.empty_cache() # Clear CUDA memory
  108. st.stop() # Stop streamlit app
  109. # Display FPS in sidebar
  110. fps_display.metric("FPS", f"{fps:.2f}")
  111. # Release the capture
  112. videocapture.release()
  113. # Clear CUDA memory
  114. torch.cuda.empty_cache()
  115. # Destroy window
  116. cv2.destroyAllWindows()
  117. # Main function call
  118. if __name__ == "__main__":
  119. inference()