
    Ǆgn              $          d dl mZmZmZmZmZmZmZ d dlZd dlm	Z	 ddl
mZmZmZmZmZmZ ddgZ G d de      Zd	d
e z   dz   e_        dee	   dee	   deee	      deee	      deee	      dee	   dee	   dee	   dedee	ef   dededee   dededef dZdedeeeej.                     eej0                     ef   eeee	         f   fdZdee	   dee	   deee	      deee	      deee	      dee	   dee	   dee	   dedee	ef   dededee   dededef d Z ee!      	 	 	 	 d$dee	   dee	   deee	      deee	      deee	      dee	   d"ee   dee	   dee	   dededeee	f   dededededef"d#       Zy)%    )castDictListOptionalTupleTYPE_CHECKINGUnionN)Tensor   )_disable_dynamo_if_unsupported_get_scalar_dtype_maximize_doc	OptimizerParamsTTensorListList	Adafactor	adafactorc                        e Zd Z	 	 	 	 	 dddddedeeef   dedeee   ef   ded	ed
ee	   de	f fdZ
 fdZd Z ej                         dd       Z xZS )r   NF)foreachmaximizeparamslrbeta2_decayepsdweight_decayr   r   c          	         t        |t              r|j                         dk7  rt        d      d|k  st        d|       d|k\  st        d|       |d   d|d   k  st        d|d          d|d   k  st        d|d          d	|k  st        d
|       d|k  st        d|       t	        |||||||      }	t
        
|   ||	       y )Nr   zTensor lr must be 1-element        z%Learning rate should be >= 0 but is: z#beta2_decay should be <= 0 but is: r   z epsilon1 should be >= 0 but is: z epsilon2 should be >= 0 but is:       ?z,Clipping threshold d should be >= 1 but is: z$weight_decay should be >= 0 but is: )r   r   r   r   r   r   r   )
isinstancer
   numel
ValueErrordictsuper__init__)selfr   r   r   r   r   r   r   r   defaults	__class__s             ^/home/mcse/projects/flask_80/flask-venv/lib/python3.12/site-packages/torch/optim/_adafactor.pyr%   zAdafactor.__init__   s    b&!bhhjAo:;;byDRDIJJk!B;-PQQq6cSVm?AxHIIc!f}?AxHIIaxKA3OPPl"CL>RSS#%
 	*    c                 f   t         |   |       | j                  D ]  }|j                  dd        |d   D ]v  }| j                  j                  |g       }t        |      dk7  s.t        j                  |d         rGt        |d         }t        j                  |t                     |d<   x  y )Nr   r   r   stepdtype)r$   __setstate__param_groups
setdefaultstategetlentorch	is_tensorfloattensorr   )r&   r2   grouppp_statestep_valr(   s         r)   r/   zAdafactor.__setstate__;   s    U#&& 	XEY-8_ X**..B/w<1$U__WV_-M$WV_5H&+ll8CTCV&WGFO	X	Xr*   c                 $   |d   D ]  }|j                   t        j                  |      rt        d      |j                   j                  rt        d      |j                  |       |j                  |j                          | j                  |   }	t        |	      dk(  rt        j                  dt                     |	d<   |j                   j                         dkD  rt        |j                   j                        }
d|
d	<   |j                   j                  |
      |	d
<   t        |j                   j                        }d|d<   |j                   j                  |      |	d<   n2t        j                  |j                   t        j                        |	d<   |j                  |	j!                  d
d              |j                  |	j!                  dd              |j                  |	j!                  dd              |j                  |	d          
 y)Nr   z-Adafactor does not support complex parametersz+Adafactor does not support sparse gradientsr   r   r-   r,   r   row_varcol_var)memory_formatvarianceF)gradr5   
is_complexRuntimeError	is_sparseappendr2   r4   r8   r   dimlistshape	new_zeros
zeros_likepreserve_formatr3   )r&   r9   params_with_gradgradsrow_varscol_vars	variancesstate_stepsr:   r2   	row_shape	col_shapes               r)   _init_groupzAdafactor._init_groupE   s    x %	.Avv~""#RSSvv"#PQQ##A&LL JJqME 5zQ !&S8I8K Lf66::<!# $QVV\\ 2I$%IbM'(vv'7'7	'BE)$ $QVV\\ 2I$%IbM'(vv'7'7	'BE)$(-(8(8e.C.C)E*% OOEIIi67OOEIIi67UYYz489uV}-K%	.L r*   c                    | j                          d}|$t        j                         5   |       }ddd       | j                  D ]q  }g }g }g }g }g }g }	|d   \  }
}| j	                  |||||||	      }t        ||||||	|d   |d   |d   |d   |
||d   |d   t        | d	d      t        | d
d      |       s |S # 1 sw Y   xY w)zPerform a single optimization step.

        Args:
            closure (Callable, optional): A closure that reevaluates the model
                and returns the loss.
        Nr   r   r   r   r   r   r   
grad_scale	found_inf)r   r   r   r   eps1eps2r   r   rY   rZ   has_complex) _cuda_graph_capture_health_checkr5   enable_gradr0   rW   r   getattr)r&   closurelossr9   rO   rP   rQ   rR   rS   rT   r[   r\   r]   s                r)   r,   zAdafactor.stepw   s    	--/""$ !y! && %	E-/"$E/1H/1H02I(*KuJD$** K  *;!-0">2i(z*"4t<!$T:'#'%	N U! !s   B;;C)g{Gz?g)NgMbP?r   r   N)__name__
__module____qualname__r   r	   r7   r
   r   r   boolr%   r/   rW   r5   no_gradr,   __classcell__)r(   s   @r)   r   r      s     $(!-9!#+ #'#+#+ %- #+ 	#+
 8E?E)*#+ #+ #+ $#+ #+JX0d U]]_5 5r*   a  Implements Adafactor algorithm.

    .. math::
        \begin{aligned}
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{input}      : \gamma \text{(lr)}, \: \tau
                \text{(}\beta_2\text{ decay)}, \: \theta_0 \text{(params)}, \: f(\theta) \text{(objective)},    \\
            &\hspace{15mm}      \: \epsilon_1, \epsilon_2 \text{ (epsilons)}, \: d \text{(clipping threshold)}, \\
            &\hspace{15mm}      \: \lambda \text{(weight decay)},
                \: \textit{maximize}                                                             \\
            &\textbf{initialize} : \: R_0 \leftarrow 0 \text{ (second moment row factor)},       \\
            &\hspace{23mm} \: C_0 \leftarrow 0 \text{ (second moment col factor)},               \\
            &\hspace{23mm} \: \widehat{V}_0 \leftarrow 0 \text{ (second moment for vectors)}     \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do}                         \\

            &\hspace{5mm}\textbf{if} \: \textit{maximize}:                                       \\
            &\hspace{10mm}G_t           \leftarrow   -\nabla_{\theta} f_t (\theta_{t-1})         \\
            &\hspace{5mm}\textbf{else}                                                           \\
            &\hspace{10mm}G_t           \leftarrow   \nabla_{\theta} f_t (\theta_{t-1})          \\
            &\hspace{5mm}\widehat{\beta}_{2_t} \leftarrow 1 - t^{\tau}                           \\
            &\hspace{5mm}\rho_t         \leftarrow min(lr, \frac{1}{\sqrt{t}})                   \\
            &\hspace{5mm}\alpha_t       \leftarrow max(\epsilon_2,
                \text{RMS}(\theta_{t-1}))\rho_t                                                  \\
            &\hspace{5mm}\theta_t       \leftarrow \theta_{t-1} - \gamma \lambda \theta_{t-1}    \\
            &\hspace{5mm}\textbf{if} \: \text{dim}(G_t) > 1:                                     \\
            &\hspace{10mm}R_t           \leftarrow \widehat{\beta}_{2_t}R_{t-1}+
                (1-\widehat{\beta}_{2_t})(G_t \odot G_t) \cdot 1_m                               \\
            &\hspace{10mm}C_t           \leftarrow \widehat{\beta}_{2_t}C_{t-1}+
                (1-\widehat{\beta}_{2_t}) 1^\top_n \cdot (G_t \odot G_t)                         \\
            &\hspace{10mm}\widehat{V}_t \leftarrow
                \frac{R_t \cdot C_t}{max(1^\top_n \cdot R_t, \epsilon_1)}                        \\
            &\hspace{5mm}\textbf{else}                                                           \\
            &\hspace{10mm}\widehat{V}_t \leftarrow \widehat{\beta}_{2_t}\widehat{V}_{t-1}+
                (1-\widehat{\beta}_{2_t}) \cdot (G_t \odot G_t)                                  \\
            &\hspace{5mm}U_t            \leftarrow
                \frac{G_t}{max(\sqrt{\widehat{V}_t}, \epsilon_1)}                                \\
            &\hspace{5mm}\widehat{U}_t  \leftarrow \frac{U_t}{max(1, \frac{\text{RMS}(U_t)}{d})} \\
            &\hspace{5mm}\theta_t       \leftarrow \theta_{t-1} - \alpha_t \widehat{U}_t         \\

            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
            &\bf{return} \:  \theta_t                                                     \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
        \end{aligned}

    For further details regarding the algorithm we refer to `Adafactor: Adaptive Learning Rates with Sublinear Memory Cost`_.
    a0  
    Args:
        params (iterable): iterable of parameters to optimize or dicts defining
            parameter groups
        lr (float, Tensor, optional): unlike other optimizers, Adafactor does not require a
            learning rate, and Shazeer, Noam, and Mitchell Stern do not use lr at all.
            Deviating from the paper, this implementation uses lr for applying weight
            decay and as the maximum value for relative step size rho_t. Note that in
            the paper, a constant of 0.01 is used as the maximum value for relative
            step size, and so we set 0.01 as the default value. (default: 1e-2)
        beta2_decay (float, optional): the decay rate of beta2. beta2 standardly refers
            to the coefficient used for computing the running average of the gradient
            squared. (default: -0.8)
        eps (Tuple[float, float], optional): epsilon1 is the term added to the denominator
            of the update calculation to improve numerical stability. This use of epsilon1
            deviates from the algorithm written in the paper! See note below for more details.
            epsilon2 is the term used to avoid having too small a weight update when applying
            parameter scaling. (default: (None, 1e-3))
        d (float, optional): the clipping threshold, used to avoid larger-than-desired
            updates.
        weight_decay (float, optional): weight decay coefficient (default: 1e-2)
        foreach (bool, optional): whether foreach implementation of optimizer is used. Note
            that the foreach implementation uses ~ sizeof(params) more peak memory than the
            for-loop version due to the intermediates being a tensorlist vs just one tensor.
            As Adafactor is commonly used when memory is prohibitive, Adafactor will default
            to the slower single tensor for-loop implementation unless this flag is explicitly
            True. This behavior is contrary to other optimizers, which will attempt defaulting
            to foreach on CUDA for faster runtime. (default: None)
        a4  
    .. Note::
        The implementation of Adafactor subtly differs from Shazeer, Noam, and Mitchell Stern
        and implementations in some other frameworks with its use of learning rate and
        :math:`\epsilon_1`.

        Regarding the learning rate hyperparameter: Shazeer, Noam, and Mitchell Stern do not
        use lr at all, as the stated algorithm uses :math:`\rho_t` and update clipping to
        affect the step size.

        This implementation allows `lr` to influence the maximum value for :math:`\rho_t`:

        .. math::
            \begin{aligned}
                &\hspace{5mm}\rho_t \leftarrow min(lr, \frac{1}{\sqrt{t}})
            \end{aligned}

        This differs from Shazeer, Noam, and Mitchell Stern, who use a constant of 0.01 as
        the maximum value of :math:`\rho_t`

        .. math::
            \begin{aligned}
                &\hspace{5mm}\rho_t \leftarrow min(0.01, \frac{1}{\sqrt{t}})
            \end{aligned}

        Shazeer, Noam, and Mitchell Stern do not enforce an opinion on how weight decay should
        be computed, and so we use the learning rate as a coefficient for decoupled weight
        decay, similar to what is suggested in `Decoupled Weight Decay Regularization`_.

        Regarding the use of :math:`\epsilon_1`: The implementation attempts to replicate the
        presumed intention of Shazeer, Noam, and Mitchell Stern to use :math:`\epsilon_1` as
        a stabilizing term when the squared gradient becomes small.

        This stabilization can be written as

        .. math::
            \begin{aligned}
                &\hspace{5mm}R_t \leftarrow \widehat{\beta}_{2_t}R_{t-1}+
                    (1-\widehat{\beta}_{2_t})(G_t \odot G_t + 1_n \cdot 1^\top_m) \cdot 1_m          \\
                &\hspace{5mm}C_t \leftarrow \widehat{\beta}_{2_t}C_{t-1}+
                    (1-\widehat{\beta}_{2_t}) 1^\top_n \cdot (G_t \odot G_t + 1_n \cdot 1^\top_m)    \\
                &\hspace{5mm}\widehat{V}_t \leftarrow
                    \frac{R_t \cdot C_t}{max(1^\top_n \cdot R_t, \epsilon_1)}                        \\
                &\hspace{5mm}U_t \leftarrow \frac{G_t}{max(\sqrt{\widehat{V}_t}, \epsilon_1)}        \\
            \end{aligned}

        where the row and column factors of gradient squared :math:`R_t` and :math:`C_t`
        are left alone, and we apply :math:`\epsilon_1` at the final calculation of
        the variance estimate :math:`\widehat{V}_t` and for the update :math:`U_t`.

        This is in contrast to Shazeer, Noam, and Mitchell Stern and other frameworks which
        apply :math:`\epsilon_1` to both row and column factors of the squared gradient, but
        not in the calculations after:

        .. math::
            \begin{aligned}
                &\hspace{5mm}R_t \leftarrow \widehat{\beta}_{2_t}R_{t-1}+
                            (1-\widehat{\beta}_{2_t})(G_t \odot G_t + \epsilon_1 1_n \cdot 1^\top_m) \cdot 1_m          \\
                &\hspace{5mm}C_t \leftarrow \widehat{\beta}_{2_t}C_{t-1}+
                            (1-\widehat{\beta}_{2_t}) 1^\top_n \cdot (G_t \odot G_t + \epsilon_1 1_n \cdot 1^\top_m)    \\
                &\hspace{5mm}\widehat{V}_t \leftarrow \frac{R_t \cdot C_t}{1^\top_n \cdot R_t}                          \\
                &\hspace{5mm}U_t \leftarrow \frac{G_t}{\sqrt{\widehat{V}_t}}                                            \\
            \end{aligned}


    .. _Adafactor\: Adaptive Learning Rates with Sublinear Memory Cost:
        https://arxiv.org/pdf/1804.04235
    .. _Decoupled Weight Decay Regularization:
        https://arxiv.org/abs/1711.05101
    r   rP   rQ   rR   rS   rT   rY   rZ   r   r   r   r   r[   r\   r   r]   c                   ||J d       t         j                  j                         rt        |	t              sJ t        |       D ]  \  }}|s||   n||    }||   }||   }||   }||   }|)t        j                  |j                        j                  }|dz  }|j                         }||
z  }t        |	d|dz  z        }t        ||j                  d      j                         |j                         dz  z        |z  }|dk7  r|j                  d|	|z  z
         |j                         dkD  r||J d       t        j                  |dd	      j!                         j#                  |j%                  d            }|j'                  ||       t        j                  |d
d	      j!                         j#                  |j%                  d
            }|j'                  ||       ||z  }|j#                  |j)                  d
d	      j+                  |             n0|J d       ||z  }|j'                  ||       |j-                         }|j+                  ||z        j/                         }|j                  |       t        d|j                  d      j                         |j                         dz  |z  z        } |j1                  || | z          y )N5Grad scaling should occur outside of optimizer.step()r         ?   r   Crow_var and col_var should be defined when grad is multidimensionalr>   TrI   keepdimr@   )min0variance should be defined when grad is a vectorr   alpha)r5   jitis_scriptingr    r7   	enumeratefinfor.   r   itemrq   maxnormr!   mul_rI   square_div_sizelerp_meanclamp_clonersqrt_add_)!r   rP   rQ   rR   rS   rT   rY   rZ   r   r   r   r   r[   r\   r   r]   iparamrD   step_tr?   rA   rC   
step_floatone_minus_beta2_trho_trt   row_meancol_meanvar_estimategrad_squaredupdatedenoms!                                    r)   _single_tensor_adafactorr   F  s   0 	y0?>?0 yy "e$$$f% 215'uQxeAhYQ1+1+Q<<;;u{{+//D 	![[]
&3BZ_-.D%**Q-,,.%++-32FGH5P 1JJq2,,-88:>#(;UTU; 

4R6>>@EEdiiPRmT  MM($56 

4R6>>@EEdiiPRmT  MM($56"W,Lgllr4l@GGDGQR $BAB$$;LNN<):;#>>+L $$$5<<>DCQ,,.6<<>S3HA2MNO

6%%
0e21r*   tensorlistsreturnc                 2   t        j                  |       }i }|j                         D ]  \  \  }}\  } }||df}||df}t        | d         D ]  \  }}	|	J d       |	j	                         dkD  rN||vr| D cg c]  }g  c}||<   t        t        |             D ]  }
||   |
   j                  | |
   |          ! p||vr| D cg c]  }g  c}||<   t        t        |             D ]  }
||   |
   j                  | |
   |          !   |S c c}w c c}w )zGroups tensors by device, dtype, AND multidimensionality -- whether the tensor
    has multiple dims or just one dim (is a vector). This allows the foreach impl of
    Adafactor to assume that every group of params will either be factored or not.TFr   zgrad should not be None)r   "_group_tensors_by_device_and_dtypeitemsrw   rI   ranger4   rH   )r   grouped_tensorsultra_grouped_tensorsdevicer.   _
matrix_key
vector_keyjr8   r   s              r)   ._group_tensors_by_device_dtype_and_is_multidimr     sl     BB;OO 	  .=-B-B-D S))+qeT*
eU+
 #;q>2 	SIAv%@'@@%zz|a%::EP8Q8Q)*5s;/0 SA)*5a8??Aq@QRS %::EP8Q8Q)*5s;/0 SA)*5a8??Aq@QRS	SS" !  9R
 9Rs   6	D	Dc                2   t        |       dk(  ry ||J d       t        | |||||g      }|j                         D ]  \  \  }}}\  }}}}}}t        t        t
           |      }t        t        t
           |      }t        t        t
           |      }|(|J d       t        j                  |      j                  }t        r|d   J |rt        j                  |      }t        j                  j                         s=|d   j                  r.t        j                  |t        j                  dd      d       nt        j                  |d       g }g }g }|D ]x  } |j!                  | j#                         |
z         |j!                  d| j#                         |
z  z
         |j!                  t%        |	d| j#                         d	z  z               z t'        ||      D !"cg c]E  \  }!}"t)        ||!j+                  d
      j#                         |!j-                         d	z  z        |"z  G }#}!}"|dk7  rt        j.                  |d|	|z  z
         |rt        t        t
           |      }$t        t        t
           |      }%|$d   |%d   J d       |D &cg c]  }&t        j*                  |&dd       }'}&t        j.                  |'|'       t        j0                  |'|D &cg c]  }&|&j3                  d       c}&       t        j.                  |$|       t        j.                  |'|       t        j                  |$|'       ~'|D &cg c]  }&t        j*                  |&dd       }(}&t        j.                  |(|(       t        j0                  |(|D &cg c]  }&|&j3                  d       c}&       t        j.                  |%|       t        j.                  |(|       t        j                  |%|(       ~(t'        |$|%      D )*cg c]
  \  })}*|)|*z   }+})}*|$D )cg c]  })|)j5                  dd       },})t        j6                  |,|       t        j0                  |+|,       ~,nt        t        t
           |      }-|-d   J d       t        j8                  ||      }.t        j.                  |-|       t        j.                  |.|       t        j                  |-|.       ~.|-D /cg c]  }/|/j;                          }+}/t        j6                  |+||z         t        j<                  |+       t        j>                  |+       t        j.                  |+|       |+}0t'        |#|0      D 12cg c]I  \  }1}2|1 t)        d|2j+                  d
      j#                         |2j-                         d	z  |z  z        z  K }#}1}2t        j.                  |0|#       t        j                  ||0        y c c}"}!w c c}&w c c}&w c c}&w c c}&w c c}*})w c c})w c c}/w c c}2}1w )Nr   rk   z2dtype is needed to compute eps1 when eps1 is unsetr   cpu)r   rs   r   rl   rm   rn   r>   Tro   r@   rr   ) r4   r   r   r   r   r
   r5   rx   r   r   _foreach_neg_utilsis_compilingis_cpu_foreach_add_r8   rH   ry   rq   ziprz   r{   r!   _foreach_mul__foreach_div_r   r   _foreach_clamp_min__foreach_mulr   _foreach_sqrt__foreach_reciprocal_)3r   rP   rQ   rR   rS   rT   rY   rZ   r   r   r   r   r[   r\   r   r]   r   r   r.   is_multidimdevice_params_device_grads_device_row_vars_device_col_vars_device_variances_device_state_steps_device_paramsdevice_gradsdevice_state_stepsone_minus_beta2_tsbeta2_tsrho_tssr:   ralphasdevice_row_varsdevice_col_varsrD   	row_means	col_meansr?   rA   var_estimatesrow_var_meansdevice_variancesgrads_squaredvupdatesar   s3                                                      r)   _multi_tensor_adafactorr     s   . 6{a 	y0?>?0 E	(I{CO 
			 v4 		E;	
 T&\>:DL-8!$v,0CD<!DCD!;;u%))D%a(444 --l;L ||((*/A!/D/K/K"ELLU$C3  2C8# 	:A%%affh+&=>OOAK 778MM#b!qvvx3"789	: M62
1 affQinn&!'')s*:;<q@
 
 1q23D/DE"4<1ABO"4<1ABO".?13E3QUTUQ DP;?

4R6I  	95	l+SdDIIbM+ST:	+=>; DP;?

4R6I  	95	l+SdDIIbM+ST:	+=>; ),O_(M$GW '!M 
 CR7>T2M  %%mT:}=#DL2CD #/BAB/ "..|\JM 0(;/AB 0-@ 1AA1QWWYAMA 	!!-=]+""=1M<8 !1
6 B#c6;;q>..0V\\^s5Ja4OPQR
 
 	GV,M73mv4X
  ,T ,T& B
s8   $A
W)W/#W4	W9W>XX	%X(AX)single_tensor_fnr   c
                    t         j                  j                         st        d |D              st	        d      |rt
        }nt        } || ||||||
|||||||||	       y)zxFunctional API that performs Adafactor algorithm computation.

    See :class:`~torch.optim.Adafactor` for details.
    c              3   P   K   | ]  }t        |t        j                           y wrc   )r    r5   r
   ).0ts     r)   	<genexpr>zadafactor.<locals>.<genexpr>s  s       3()
1ell#3s   $&z?`state_steps` argument must contain a list of singleton tensors)
r   r   r   r   r[   r\   r   rY   rZ   r]   N)r5   r   r   allrF   r   r   )r   rP   rQ   rR   rS   rT   r   rY   rZ   r]   r   r   r   r   r[   r\   r   funcs                     r)   r   r   X  s    6 <<$$&s 3-83 0 M
 	
 &'
!!r*   )NNNF)typingr   r   r   r   r   r   r	   r5   r
   	optimizerr   r   r   r   r   r   __all__r   __doc__r7   rg   r   r   r.   r   r   r    r*   r)   <module>r      s   K J J    
$X	 Xx.^	8 
9_KXEYQ 
 lS1LS1<S1 8F#$S1 8F#$S1 HV$%S1 fS1  S1 S1 S1 	femS1  !S1" #S1$ 5/%S1& 'S1( )S1* +S1l!!		(5<<
 (5;;"7
=>hv	 "!DW4LW4<W4 8F#$W4 8F#$W4 HV$%W4 fW4  W4 W4 W4 	femW4  !W4" #W4$ 5/%W4& 'W4( )W4* +W4t  1IJ ##'"&7L7<7 8F#$7 8F#$	7
 HV$%7 f7 d^7  7 7 7 7 	eVm7  !7" #7$ %7& '7( )7 K7r*   