autobatch.py 3.9 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788
  1. # Ultralytics YOLO 🚀, AGPL-3.0 license
  2. """Functions for estimating the best YOLO batch size to use a fraction of the available CUDA memory in PyTorch."""
  3. from copy import deepcopy
  4. import numpy as np
  5. import torch
  6. from ultralytics.utils import DEFAULT_CFG, LOGGER, colorstr
  7. from ultralytics.utils.torch_utils import profile
  8. def check_train_batch_size(model, imgsz=640, amp=True, batch=-1):
  9. """
  10. Compute optimal YOLO training batch size using the autobatch() function.
  11. Args:
  12. model (torch.nn.Module): YOLO model to check batch size for.
  13. imgsz (int): Image size used for training.
  14. amp (bool): If True, use automatic mixed precision (AMP) for training.
  15. Returns:
  16. (int): Optimal batch size computed using the autobatch() function.
  17. """
  18. with torch.cuda.amp.autocast(amp):
  19. return autobatch(deepcopy(model).train(), imgsz, fraction=batch if 0.0 < batch < 1.0 else 0.6)
  20. def autobatch(model, imgsz=640, fraction=0.60, batch_size=DEFAULT_CFG.batch):
  21. """
  22. Automatically estimate the best YOLO batch size to use a fraction of the available CUDA memory.
  23. Args:
  24. model (torch.nn.module): YOLO model to compute batch size for.
  25. imgsz (int, optional): The image size used as input for the YOLO model. Defaults to 640.
  26. fraction (float, optional): The fraction of available CUDA memory to use. Defaults to 0.60.
  27. batch_size (int, optional): The default batch size to use if an error is detected. Defaults to 16.
  28. Returns:
  29. (int): The optimal batch size.
  30. """
  31. # Check device
  32. prefix = colorstr("AutoBatch: ")
  33. LOGGER.info(f"{prefix}Computing optimal batch size for imgsz={imgsz} at {fraction * 100}% CUDA memory utilization.")
  34. device = next(model.parameters()).device # get model device
  35. if device.type in {"cpu", "mps"}:
  36. LOGGER.info(f"{prefix} ⚠️ intended for CUDA devices, using default batch-size {batch_size}")
  37. return batch_size
  38. if torch.backends.cudnn.benchmark:
  39. LOGGER.info(f"{prefix} ⚠️ Requires torch.backends.cudnn.benchmark=False, using default batch-size {batch_size}")
  40. return batch_size
  41. # Inspect CUDA memory
  42. gb = 1 << 30 # bytes to GiB (1024 ** 3)
  43. d = str(device).upper() # 'CUDA:0'
  44. properties = torch.cuda.get_device_properties(device) # device properties
  45. t = properties.total_memory / gb # GiB total
  46. r = torch.cuda.memory_reserved(device) / gb # GiB reserved
  47. a = torch.cuda.memory_allocated(device) / gb # GiB allocated
  48. f = t - (r + a) # GiB free
  49. LOGGER.info(f"{prefix}{d} ({properties.name}) {t:.2f}G total, {r:.2f}G reserved, {a:.2f}G allocated, {f:.2f}G free")
  50. # Profile batch sizes
  51. batch_sizes = [1, 2, 4, 8, 16]
  52. try:
  53. img = [torch.empty(b, 3, imgsz, imgsz) for b in batch_sizes]
  54. results = profile(img, model, n=3, device=device)
  55. # Fit a solution
  56. y = [x[2] for x in results if x] # memory [2]
  57. p = np.polyfit(batch_sizes[: len(y)], y, deg=1) # first degree polynomial fit
  58. b = int((f * fraction - p[1]) / p[0]) # y intercept (optimal batch size)
  59. if None in results: # some sizes failed
  60. i = results.index(None) # first fail index
  61. if b >= batch_sizes[i]: # y intercept above failure point
  62. b = batch_sizes[max(i - 1, 0)] # select prior safe point
  63. if b < 1 or b > 1024: # b outside of safe range
  64. b = batch_size
  65. LOGGER.info(f"{prefix}WARNING ⚠️ CUDA anomaly detected, using default batch-size {batch_size}.")
  66. fraction = (np.polyval(p, b) + r + a) / t # actual fraction predicted
  67. LOGGER.info(f"{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%) ✅")
  68. return b
  69. except Exception as e:
  70. LOGGER.warning(f"{prefix}WARNING ⚠️ error detected: {e}, using default batch-size {batch_size}.")
  71. return batch_size