o
    ,hm`                  "   @   s  d Z ddlmZmZmZ ddlZddlmZ ddlmZm	Z	m
Z
mZmZmZmZmZmZmZmZmZmZmZmZ ddgZG d	d deZd
de de de de de
 d e_ dee dee dee dee dee dedededededededededefddZdee dee dee dee dee dedededededededededefd d!Zeed"	#		#	#	#	#d&dee dee dee dee dee ded$ee dededededededededef d%dZdS )'z'Implementation for the RAdam algorithm.    )castOptionalUnionN)Tensor   )_capturable_doc_default_to_fused_or_foreach_differentiable_doc_disable_dynamo_if_unsupported_foreach_doc!_get_capturable_supported_devices_get_scalar_dtype
_get_value_maximize_doc_params_doc
_to_scalar_use_grad_for_differentiable_view_as_real	OptimizerParamsTRAdamradamc                       s   e Zd Z					ddddddded	eeef d
eeef dededede	e dededef fddZ
 fddZdd ZedddZ  ZS )r   MbP?g?g+?:0yE>r   FN)foreachmaximize
capturabledifferentiableparamslrbetasepsweight_decaydecoupled_weight_decayr   r   r   r   c                   s   t |tr| dkrtdd|kstd| d|ks%td| d|d   kr1dk s;n td|d  d|d   krGdk sQn td	|d  d|ks\td
| t|||||||	||
d	}t || d S )Nr   zTensor lr must be 1-element        zInvalid learning rate: zInvalid epsilon value: r         ?z#Invalid beta parameter at index 0: z#Invalid beta parameter at index 1: zInvalid weight_decay value: )	r    r!   r"   r#   r   r   r   r$   r   )
isinstancer   numel
ValueErrordictsuper__init__)selfr   r    r!   r"   r#   r$   r   r   r   r   defaults	__class__ L/var/www/html/scripts/venv/lib/python3.10/site-packages/torch/optim/radam.pyr,       s0   zRAdam.__init__c                    s   t  | | jD ]Y}|dd  |dd |dd |dd |dd |d D ]4}| j|g }t|dkrat|d	 sat	|d	 }|d rWtj
|t |jd
ntj
|t d|d	< q-q	d S )Nr   r   Fr   r$   r   r   r   stepdtypedevicer5   )r+   __setstate__param_groups
setdefaultstategetlentorch	is_tensorfloattensorr   r6   )r-   r;   grouppp_statestep_valr/   r1   r2   r8   H   s(   

zRAdam.__setstate__c           
      C   s   d}|d D ]m}|j d urs|t|O }|| |j jr!td||j  | j| }	t|	dkr^|d r@tjdt	 |j
dntjdt	 d	|	d
< tj|tjd|	d< tj|tjd|	d< ||	d  ||	d  ||	d
  q|S )NFr   z'RAdam does not support sparse gradientsr   r   r1   r4   r%   r7   r3   )memory_formatexp_avg
exp_avg_sq)gradr>   
is_complexappend	is_sparseRuntimeErrorr;   r=   zerosr   r6   rA   
zeros_likepreserve_format)
r-   rB   params_with_gradgradsexp_avgsexp_avg_sqsstate_stepshas_complexrC   r;   r1   r1   r2   _init_group\   s2   




zRAdam._init_groupc                 C   s   |    d}|dur!t  | }W d   n1 sw   Y  | jD ]G}g }g }g }g }g }ttttf |d \}	}
| ||||||}t||||||	|
|d |d |d |d |d |d |d	 |d
 |d q$|S )zPerform a single optimization step.

        Args:
            closure (Callable, optional): A closure that reevaluates the model
                and returns the loss.
        Nr!   r    r#   r"   r   r   r   r   r$   )beta1beta2r    r#   r"   r   r   r   r   r$   rV   )	 _cuda_graph_capture_health_checkr>   enable_gradr9   r   tupler@   rW   r   )r-   closurelossrB   rQ   rR   rS   rT   rU   rX   rY   rV   r1   r1   r2   r3      sF   

z
RAdam.step)r   r   r   r   FN)__name__
__module____qualname__r   r   r@   r   r\   boolr   r,   r8   rW   r   r3   __classcell__r1   r1   r/   r2   r      sH    	

	
(#a  Implements RAdam algorithm.

    .. math::
       \begin{aligned}
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{input}      : \gamma \text{ (lr)}, \: \beta_1, \beta_2
                \text{ (betas)}, \: \theta_0 \text{ (params)}, \:f(\theta) \text{ (objective)}, \:
                \lambda \text{ (weightdecay)}, \:\textit{maximize}                               \\
            &\hspace{13mm} \epsilon \text{ (epsilon)}, \textit{decoupled\_weight\_decay}         \\
            &\textbf{initialize} :  m_0 \leftarrow 0 \text{ ( first moment)},
                v_0 \leftarrow 0 \text{ ( second moment)},                                       \\
            &\hspace{18mm} \rho_{\infty} \leftarrow 2/(1-\beta_2) -1                      \\[-1.ex]
            &\rule{110mm}{0.4pt}  \\
            &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do}                         \\
            &\hspace{6mm}\textbf{if} \: \textit{maximize}:                                       \\
            &\hspace{12mm}g_t           \leftarrow   -\nabla_{\theta} f_t (\theta_{t-1})         \\
            &\hspace{6mm}\textbf{else}                                                           \\
            &\hspace{12mm}g_t           \leftarrow   \nabla_{\theta} f_t (\theta_{t-1})          \\
            &\hspace{6mm} \theta_t \leftarrow \theta_{t-1}                                       \\
            &\hspace{6mm} \textbf{if} \: \lambda \neq 0                                          \\
            &\hspace{12mm}\textbf{if} \: \textit{decoupled\_weight\_decay}                       \\
            &\hspace{18mm} \theta_t \leftarrow \theta_{t} - \gamma \lambda \theta_{t}            \\
            &\hspace{12mm}\textbf{else}                                                          \\
            &\hspace{18mm} g_t \leftarrow g_t + \lambda \theta_{t}                               \\
            &\hspace{6mm}m_t           \leftarrow   \beta_1 m_{t-1} + (1 - \beta_1) g_t          \\
            &\hspace{6mm}v_t           \leftarrow   \beta_2 v_{t-1} + (1-\beta_2) g^2_t          \\
            &\hspace{6mm}\widehat{m_t} \leftarrow   m_t/\big(1-\beta_1^t \big)                   \\
            &\hspace{6mm}\rho_t \leftarrow \rho_{\infty} -
                2 t \beta^t_2 /\big(1-\beta_2^t \big)                                    \\[0.1.ex]
            &\hspace{6mm}\textbf{if} \: \rho_t > 5                                               \\
            &\hspace{12mm} l_t \leftarrow \frac{\sqrt{ (1-\beta^t_2) }}{ \sqrt{v_t} +\epsilon  } \\
            &\hspace{12mm} r_t \leftarrow
      \sqrt{\frac{(\rho_t-4)(\rho_t-2)\rho_{\infty}}{(\rho_{\infty}-4)(\rho_{\infty}-2) \rho_t}} \\
            &\hspace{12mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t} r_t l_t        \\
            &\hspace{6mm}\textbf{else}                                                           \\
            &\hspace{12mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t}                \\
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
            &\bf{return} \:  \theta_t                                                     \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
       \end{aligned}

    For further details regarding the algorithm we refer to `On the variance of the adaptive learning rate and beyond`_.

    This implementation provides an option to use either the original weight_decay implementation as in Adam
    (where the weight_decay is applied to the gradient) or the one from AdamW (where weight_decay is applied
    to the weight) through the decoupled_weight_decay option. When decoupled_weight_decay is set to False
    (default), it uses the original Adam style weight decay, otherwise, it uses the AdamW style which
    corresponds more closely to the `author's implementation`_ in the RAdam paper. Further information
    about decoupled weight decay can be found in `Decoupled Weight Decay Regularization`_.

    z
    Args:
        a  
        lr (float, Tensor, optional): learning rate (default: 1e-3)
        betas (Tuple[float, float], optional): coefficients used for computing
            running averages of gradient and its square (default: (0.9, 0.999))
        eps (float, optional): term added to the denominator to improve
            numerical stability (default: 1e-8)
        weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
        decoupled_weight_decay (bool, optional): whether to decouple the weight
            decay as in AdamW to obtain RAdamW. If True, the algorithm does not
            accumulate weight decay in the momentum nor variance. (default: False)
        z	
        a  

    .. _On the variance of the adaptive learning rate and beyond:
        https://arxiv.org/abs/1908.03265
    .. _author's implementation:
        https://github.com/LiyuanLucasLiu/RAdam
    .. _Decoupled Weight Decay Regularization:
        https://arxiv.org/abs/1711.05101

    r   rR   rS   rT   rU   rX   rY   r    r#   r"   r$   r   r   r   rV   c       
            s  t j s	t|}t| D ]\}}|s|| n||  }|| }|| || }t j sH|rHt }|jj	|jj	kr@|jj	|v sHJ d| dt 
|rat |}t |}t |}t |d7 }|ri|nt|}|dkr|
r}|d||   n|j||d}||d|  |j||d| d d||  }d||   || }dd|  d d| ||     fdd	} fd
d}|rt dk| |  d}|j|| | dd qdkr|j|| |  |  dd q|j|| dd qd S )NIIf capturable=True, params and state_steps must be on supported devices: .r   r   alpha)value   c                      s,   d d     d  d    d S )N   rj         ?r1   r1   )rho_infrho_tr1   r2   _compute_rectD  s   z+_single_tensor_radam.<locals>._compute_rectc                     s.     } r| } n| }  d |  S )Nrl   )sqrtaddadd_)exp_avg_sq_sqrt)bias_correction2r   r"   rH   r1   r2   _compute_adaptive_lrL  s
   
z2_single_tensor_radam.<locals>._compute_adaptive_lr      @r&   g      )r>   jitis_scriptingr   	enumeratecompileris_compilingr   r6   typerJ   view_as_realr   mul_rq   lerp_addcmul_whererr   )r   rR   rS   rT   rU   rX   rY   r    r#   r"   r$   r   r   r   rV   iparamrI   rG   step_tcapturable_supported_devicesr3   bias_correction1bias_corrected_exp_avgro   ru   updater1   )rt   r   r"   rH   rm   rn   r2   _single_tensor_radam   sf   







r   c       
   %         s8  t | dkrd S |rJ dtj s0|r0tddtfddt| |D s0J d dtt	| ||||g}|
 D ]\\}}}}}}ttt |}ttt |}ttt |}ttt |}ttt |}tj s|d jrtj|tjd	d
dd	d nt|d |rt|||| |rt|}dd  d |rt|}t| t|d t|}t|| t|d t|| t| t| |}n
fdd|D }|dkr|
rt|d|   n|rtj|||d ntj|||d}t||d   t| t|||d  ~|rt|d}t|d}t|| ~t| d d  t|} t||  ~ t| dd t||D }!~~dd |!D }"t|" t |}t| t|d t|"| t|" t|}t| t|d t| t| t||! ~!t| t|| ~n3fdd|D }!dd |!D }# fdd|D }fddt|#|D }"fddt||!|D }t|}$t|$|	 t|$| t|$ t|$|" t|||$ qBd S )Nr   z#_foreach ops don't support autogradF)supports_xlac                 3   s0    | ]\}}|j j|j jko|j j v V  qd S r_   )r6   r|   ).0rC   r3   )r   r1   r2   	<genexpr>  s    

z&_multi_tensor_radam.<locals>.<genexpr>re   rf   r&   cpu)r6   rg   r   rj   c                    s8   g | ]}d t |  t |  d t |    qS )rj   r   r   r   r3   )rY   rm   r1   r2   
<listcomp>  s    
z'_multi_tensor_radam.<locals>.<listcomp>rk   c                 S   s"   g | ]\}}t |d k|dqS )rv   r%   r>   r   )r   nrn   r1   r1   r2   r     s    c                 S   s   g | ]}t |d kddqS )r   r%   r&   r   r   rectr1   r1   r2   r     s    c                    sD   g | ]}|d kr|d |d     d  d  |  d ndqS )   rk   rj   rl   r   r1   )r   rn   )rm   r1   r2   r     s    
c                 S   s   g | ]
}|d kr
d ndqS )r   r&   r1   r   r1   r1   r2   r     s    c                    s   g | ]
}d  t |  qS )r   r   r   )rX   r1   r2   r     s    c                    s    g | ]\}} | | d  qS )r1   )r   r   bc)r    r1   r2   r     s    c                    s6   g | ]\}}}d  t |  d | |  d qS )r   rl   r   r   )r   r3   r   r   )rY   r    r1   r2   r      s    ")r=   r>   rz   r{   r   allzipr   r   "_group_tensors_by_device_and_dtypevaluesr   listr   is_cpu_foreach_add_rA   r   _foreach_neg_foreach_pow_foreach_neg__foreach_mul__foreach_div__foreach_add_foreach_lerp__foreach_addcmul__foreach_sub_foreach_mul_foreach_sqrt__foreach_sqrt_foreach_reciprocal_)%r   rR   rS   rT   rU   rX   rY   r    r#   r"   r$   r   r   r   rV   grouped_tensorsgrouped_params_grouped_grads_grouped_exp_avgs_grouped_exp_avg_sqs_grouped_state_steps__grouped_paramsgrouped_gradsgrouped_exp_avgsgrouped_exp_avg_sqsgrouped_state_stepsr   rt   
rho_t_listnumsub2denomr   unrect_step_sizeunrectifiedbufferr1   )rX   rY   r   r    rm   r2   _multi_tensor_radamh  s   

	




	












 r   )single_tensor_fnFr   c                C   s   t dd |D std|du rt| |dd\}}|r%tj r%td|r/tj s/t}nt}|| ||||||||||
||||	d dS )	zpFunctional API that performs RAdam algorithm computation.

    See :class:`~torch.optim.RAdam` for details.
    c                 s   s    | ]	}t |tjV  qd S r_   )r'   r>   r   )r   tr1   r1   r2   r   I  s    zradam.<locals>.<genexpr>zPAPI has changed, `state_steps` argument must contain a list of singleton tensorsNF)	use_fusedz6torch.jit.script not supported with foreach optimizers)
rX   rY   r    r#   r"   r   r$   r   r   rV   )r   rM   r   r>   rw   rx   r   r   )r   rR   rS   rT   rU   r$   r   r   r   rV   r   rX   rY   r    r#   r"   r   funcr1   r1   r2   r   /  s<   

)FNFFFF)__doc__typingr   r   r   r>   r   	optimizerr   r   r	   r
   r   r   r   r   r   r   r   r   r   r   r   __all__r   r   r@   rc   r   r   r   r1   r1   r1   r2   <module>   s   D 3P	

h	

 H		
