
    ǄgS                     ~    d dl mZmZmZ d dlZd dlmZ ddlmZ ddl	m
Z
mZmZ dgZ G d de      Zd	e
 d
e_        y)    )ListTupleUnionN)Tensor   )_functional)_maximize_doc	OptimizerParamsT
SparseAdamc                        e Zd Z	 	 	 	 ddedeeef   deeef   dedef
 fdZ	 e
j                         d	d       Z xZS )
r   paramslrbetasepsmaximizec                    t        |t              r|j                         dk7  rt        d      d|k  st        d|       d|k  st        d|       d|d   cxk  rdk  sn t        d|d          d|d   cxk  rdk  sn t        d	|d          t	        ||||
      }t
        |   ||       g }g }t        | j                        D ]  \  }	}
t        |
t              sJ dt        |
              t        |
d         D ]H  \  }}|j                  r|j                  |	|g       |j                         s6|j                  |	|g       J  |rt        d| d      |rt        d| d      y )Nr   zTensor lr must be 1-elementg        zInvalid learning rate: zInvalid epsilon value: r   g      ?z#Invalid beta parameter at index 0: z#Invalid beta parameter at index 1: )r   r   r   r   z.param_groups must be a list of dicts, but got r   zSparse params at indices z-: SparseAdam requires dense parameter tensorszComplex params at indices z0: SparseAdam does not support complex parameters)
isinstancer   numel
ValueErrordictsuper__init__	enumerateparam_groupstype	is_sparseappend
is_complex)selfr   r   r   r   r   defaultssparse_paramscomplex_paramsindexparam_groupd_indexd_param	__class__s                _/home/mcse/projects/flask_80/flask-venv/lib/python3.12/site-packages/torch/optim/sparse_adam.pyr   zSparseAdam.__init__   s    b&!bhhjAo:;;Rx6rd;<<Sy6se<==eAh$$B58*MNNeAh$$B58*MNN2UhG*"+D,=,="> 		<E;T T?[@Q?RST  %.k(.C$D < $$!((%)9:%%'"))5'*:;	<		< +M?:gh  ,^,<<lm      c                 j   d}|$t        j                         5   |       }ddd       | j                  D ]m  }g }g }g }g }g }|d   \  }	}
|j                  dd      }|d   D ]  }|j                  |j                  |       |j                  j                  st        d      |j                  |j                         | j                  |   }t        |      dk(  rUd|d<   t        j                  |t         j                  	      |d
<   t        j                  |t         j                  	      |d<   |j                  |d
          |j                  |d          |dxx   dz  cc<   |j                  |d           t        j                  ||||||d   |	|
|d   |
       p |S # 1 sw Y   xY w)zPerform a single optimization step.

        Args:
            closure (Callable, optional): A closure that reevaluates the model
                and returns the loss.
        Nr   r   Fr   zISparseAdam does not support dense gradients, please consider Adam insteadr   step)memory_formatexp_avg
exp_avg_sqr   r   r   )r   beta1beta2r   r   )torchenable_gradr   getgradr   r   RuntimeErrorstatelen
zeros_likepreserve_formatFsparse_adam)r    closurelossgroupparams_with_gradgradsexp_avgsexp_avg_sqsstate_stepsr0   r1   r   pr7   s                 r)   r,   zSparseAdam.step:   s    ""$ !y! && 3	E-/"$E%'H(*K%'K >LE5yyU3H8_ 666%$++A.66++*g  LL( JJqME 5zQ()f+0+;+;U-B-B,i( /4.>.>U-B-B/l+ OOE)$45&&u\':; &MQ&M&&uV}5;6> MM %L;!Q3	j q! !s   F((F2)gMbP?)g?g+?g:0yE>F)N)__name__
__module____qualname__r   r   floatr   r   boolr   r2   no_gradr,   __classcell__)r(   s   @r)   r   r      sw     $(%1)) %- ) UE\"	)
 ) )V U]]_A Ar*   a"  SparseAdam implements a masked version of the Adam algorithm
    suitable for sparse gradients. Currently, due to implementation constraints (explained
    below), SparseAdam is only intended for a narrow subset of use cases, specifically
    parameters of a dense layout with gradients of a sparse layout. This occurs in a
    special case where the module backwards produces grads already in a sparse layout.
    One example NN module that behaves as such is ``nn.Embedding(sparse=True)``.

    SparseAdam approximates the Adam algorithm by masking out the parameter and moment
    updates corresponding to the zero values in the gradients. Whereas the Adam algorithm
    will update the first moment, the second moment, and the parameters based on all values
    of the gradients, SparseAdam only updates the moments and parameters corresponding
    to the non-zero values of the gradients.

    A simplified way of thinking about the `intended` implementation is as such:

    1. Create a mask of the non-zero values in the sparse gradients. For example,
       if your gradient looks like [0, 5, 0, 0, 9], the mask would be [0, 1, 0, 0, 1].
    2. Apply this mask over the running moments and do computation on only the
       non-zero values.
    3. Apply this mask over the parameters and only apply an update on non-zero values.

    In actuality, we use sparse layout Tensors to optimize this approximation, which means the
    more gradients that are masked by not being materialized, the more performant the optimization.
    Since we rely on using sparse layout tensors, we infer that any materialized value in the
    sparse layout is non-zero and we do NOT actually verify that all values are not zero!
    It is important to not conflate a semantically sparse tensor (a tensor where many
    of its values are zeros) with a sparse layout tensor (a tensor where ``.is_sparse``
    returns ``True``). The SparseAdam approximation is intended for `semantically` sparse
    tensors and the sparse layout is only a implementation detail. A clearer implementation
    would be to use MaskedTensors, but those are experimental.


    .. note::

        If you suspect your gradients are semantically sparse (but do not have sparse
        layout), this variant may not be the best for you. Ideally, you want to avoid
        materializing anything that is suspected to be sparse in the first place, since
        needing to convert all your grads from dense layout to sparse layout may outweigh
        the performance gain. Here, using Adam may be the best alternative, unless you
        can easily rig up your module to output sparse grads similar to
        ``nn.Embedding(sparse=True)``. If you insist on converting your grads, you can do
        so by manually overriding your parameters' ``.grad`` fields with their sparse
        equivalents before calling ``.step()``.


    Args:
        params (iterable): iterable of parameters to optimize or dicts defining
            parameter groups
        lr (float, Tensor, optional): learning rate (default: 1e-3)
        betas (Tuple[float, float], optional): coefficients used for computing
            running averages of gradient and its square (default: (0.9, 0.999))
        eps (float, optional): term added to the denominator to improve
            numerical stability (default: 1e-8)
        zd

    .. _Adam\: A Method for Stochastic Optimization:
        https://arxiv.org/abs/1412.6980

    )typingr   r   r   r2   r    r   r;   	optimizerr	   r
   r   __all__r   __doc__ r*   r)   <module>rS      sO    % %    8 8 .n nb5	j 
 k:
 r*   