
    .h                        d Z ddlmZ ddlZddlmZ ddlZddlZddl	m
Z
mZmZ ddlmZmZ 	 	 	 	 d	 	 	 	 	 	 	 	 	 	 	 dd	Zdd
e
j"                  df	 	 	 	 	 	 	 	 	 	 	 ddZy)zlFunctions for estimating the best YOLO batch size to use a fraction of the available CUDA memory in PyTorch.    )annotationsN)deepcopy)DEFAULT_CFGLOGGERcolorstr)autocastprofile_ops     c                    t        |      5  t        t        |       j                         |d|cxk  rdk  rn n|nd|      cddd       S # 1 sw Y   yxY w)a  
    Compute optimal YOLO training batch size using the autobatch() function.

    Args:
        model (torch.nn.Module): YOLO model to check batch size for.
        imgsz (int, optional): Image size used for training.
        amp (bool, optional): Use automatic mixed precision if True.
        batch (int | float, optional): Fraction of GPU memory to use. If -1, use default.
        max_num_obj (int, optional): The maximum number of objects from dataset.

    Returns:
        (int): Optimal batch size computed using the autobatch() function.

    Notes:
        If 0.0 < batch < 1.0, it's used as the fraction of GPU memory to use.
        Otherwise, a default fraction of 0.6 is used.
    )enabledg        g      ?333333?)fractionmax_num_objN)r   	autobatchr   train)modelimgszampbatchr   s        Y/var/www/html/ai-service/venv/lib/python3.12/site-packages/ultralytics/utils/autobatch.pycheck_train_batch_sizer      sO    0 
#	 
UO!!#UcE>OC>OUUXfq

 
 
s   6AAr   c                   t        d      }t        j                  | d| d|dz   d       t        | j	                               j
                  }|j                  dv rt        j                  | d|        |S t        j                  j                  j                  rt        j                  | d|        |S d	}d
t        j                  dd      j                         d    }t        j                  j!                  |      }	|	j"                  |z  }
t        j                  j%                  |      |z  }t        j                  j'                  |      |z  }|
||z   z
  }t        j                  | | d|	j(                   d|
dd|dd|dd|dd       |
dk  rg dng d}	 |D cg c]  }t        j*                  |d||       }}t-        || d||      }t/        t1        ||            D cg c]Y  \  }\  }}|rOt3        |d   t4        t6        f      r6d|d   cxk  r|
k  r(n n%|dk(  s||dz
     r|d   ||dz
     d   kD  r||d   g[ }}}}|rt1        | ng g f\  }}t9        j:                  ||d      }t5        t=        ||z        |d   z
  |d   z        }d|v r+|j?                  d      }|||   k\  r|tA        |dz
  d         }|dk  s|dkD  r t        j                  | d| d | d!       |}t9        jB                  ||      |z   |z   |
z  }t        j                  | d"| d#| d$|
|z  dd%|
dd&|dz  d'd(       |t        j                  jE                          S c c}w c c}}}w # tF        $ rH}t        j                  | d)| d*| d!       |cY d}~t        j                  jE                          S d}~ww xY w# t        j                  jE                          w xY w)+a>  
    Automatically estimate the best YOLO batch size to use a fraction of the available CUDA memory.

    Args:
        model (torch.nn.Module): YOLO model to compute batch size for.
        imgsz (int, optional): The image size used as input for the YOLO model.
        fraction (float, optional): The fraction of available CUDA memory to use.
        batch_size (int, optional): The default batch size to use if an error is detected.
        max_num_obj (int, optional): The maximum number of objects from dataset.

    Returns:
        (int): The optimal batch size.
    zAutoBatch: z'Computing optimal batch size for imgsz=z at d   z% CUDA memory utilization.>   cpumpsz4intended for CUDA devices, using default batch-size zHRequires torch.backends.cudnn.benchmark=False, using default batch-size i   @zCUDA:CUDA_VISIBLE_DEVICES0r   z (z) z.2fz	G total, zG reserved, zG allocated, zG free   )r            r   )r   r    r!   r"   r       @      r   )ndevicer   r    )degNi   zbatch=z. outside safe range, using default batch-size .zUsing batch-size z for  zG/zG (z.0fu   %) ✅zerror detected: z,  using default batch-size )$r   r   infonext
parametersr'   typewarningtorchbackendscudnn	benchmarkosgetenvstripcudaget_device_propertiestotal_memorymemory_reservedmemory_allocatednameemptyr	   	enumeratezip
isinstanceintfloatnppolyfitroundindexmaxpolyvalempty_cache	Exception)r   r   r   
batch_sizer   prefixr'   gbd
propertiestrafbatch_sizesbimgresultsixyxyfit_xfit_ypes                             r   r   r   .   s   * m$F
KK6(A%XX[^L\\vwx%""$%,,F{{n$&!UV`Uabc~~%%&!ijtiuvw 
B
		0#6<<>qABCA11&9J"$A

""6*R/A

##F+b0A	QUA
KK6(1#R
01S'1S'VWX[U\\ijkloippvwx '("f"2JK!8CD1u{{1a.DDc5Af+V
 's;'@A
 
6Aq1Q4#u.AaD1awq1u~1Aq8I1I !I
 
 $&sBxB8uJJue+q8|$qt+qt347?d#AKN"AE1.q5AHNNfXVA3.\]g\hhijkAJJq!$q(1,1vh/s%s!AL;MRPQRUwVYZbehZhilYmmstu
 	

 = E
,  &!1!4PQ[P\\]^_

 	 	

 sP   %M5 )M)+M5 3AM.C9M5 )M5 5	O>OOO	 OO	 	 O))r
   Tr   )r   torch.nn.Moduler   rA   r   boolr   zint | floatr   rA   returnrA   )r   ra   r   rA   r   rB   rK   rA   r   rA   rc   rA   )__doc__
__future__r   r4   copyr   numpyrC   r0   ultralytics.utilsr   r   r   ultralytics.utils.torch_utilsr   r	   r   r   r        r   <module>rl      s    r " 	    ; ; ?
 


 

 	

 
 	
@ !''J!J!J! J! 	J!
 J! 	J!rk   