U
    9%e38                     @   s2  d dl Z d dlZd dlmZ ddlmZmZmZmZmZm	Z	m
Z
mZ d dlmZmZ ddgZG dd deZd	d
e de
 d e_dee ee ee ee ee ee eeeeeedddZee ee ee ee ee eeeeeedddZee ee ee ee ee eeeeeedddZdS )    N)Tensor   )	Optimizer_use_grad_for_differentiable
_get_value_dispatch_sqrt_stack_if_compiling_default_to_fused_or_foreach_differentiable_doc_foreach_doc)ListOptionalRAdamradamc                       sT   e Zd Zddddee ed fdd	Z fd
dZdd ZedddZ	  Z
S )r   MbP?g?g+?:0yE>r   NF)foreachdifferentiablec          	         s   d|kst d| d|ks,t d| d|d   krDdk sXn t d|d  d|d   krpdk sn t d|d  d|kst d	| t||||||d
}t || d S )N        zInvalid learning rate: zInvalid epsilon value: r         ?z#Invalid beta parameter at index 0: r   z#Invalid beta parameter at index 1: zInvalid weight_decay value: )lrbetasepsweight_decayr   r   )
ValueErrordictsuper__init__)	selfparamsr   r   r   r   r   r   defaults	__class__ P/var/www/html/Darija-Ai-API/env/lib/python3.8/site-packages/torch/optim/radam.pyr      s&    zRAdam.__init__c                    s   t  | | jD ]}|dd  |dd qt| j }t|dkoZt	|d d }|s|D ]}t
t|d |d< qdd S )Nr   r   Fr   step)r   __setstate__param_groups
setdefaultliststatevalueslentorchZ	is_tensortensorfloat)r   r+   groupZstate_valuesZstep_is_tensorsr"   r$   r%   r'   ,   s    

zRAdam.__setstate__c           	      C   s   |d D ]}|j d k	r|| |j jr0td||j  | j| }t|dkrtd|d< tj|tj	d|d< tj|tj	d|d< ||d  ||d  ||d  qd S )	Nr    z'RAdam does not support sparse gradientsr   r   r&   )Zmemory_formatexp_avg
exp_avg_sq)
gradappendZ	is_sparseRuntimeErrorr+   r-   r.   r/   Z
zeros_likeZpreserve_format)	r   r1   params_with_gradgradsexp_avgsexp_avg_sqsstate_stepspr+   r$   r$   r%   _init_group9   s(    


 
 
zRAdam._init_groupc                 C   s   d}|dk	r&t   | }W 5 Q R X | jD ]l}g }g }g }g }g }|d \}	}
| |||||| t||||||	|
|d |d |d |d |d d q,|S )	zPerforms a single optimization step.

        Args:
            closure (Callable, optional): A closure that reevaluates the model
                and returns the loss.
        Nr   r   r   r   r   r   )beta1beta2r   r   r   r   r   )r.   Zenable_gradr(   r>   r   )r   closureZlossr1   r8   r9   r:   r;   r<   r?   r@   r$   r$   r%   r&   R   s6    

z
RAdam.step)r   r   r   r   )N)__name__
__module____qualname__r   boolr   r'   r>   r   r&   __classcell__r$   r$   r"   r%   r      s       a  Implements RAdam algorithm.

    .. math::
       \begin{aligned}
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{input}      : \gamma \text{ (lr)}, \: \beta_1, \beta_2
                \text{ (betas)}, \: \theta_0 \text{ (params)}, \:f(\theta) \text{ (objective)}, \:
                \lambda \text{ (weightdecay)},                                                   \\
            &\hspace{13mm} \epsilon \text{ (epsilon)}                                            \\
            &\textbf{initialize} :  m_0 \leftarrow 0 \text{ ( first moment)},
                v_0 \leftarrow 0 \text{ ( second moment)},                                       \\
            &\hspace{18mm} \rho_{\infty} \leftarrow 2/(1-\beta_2) -1                      \\[-1.ex]
            &\rule{110mm}{0.4pt}  \\
            &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do}                         \\
            &\hspace{6mm}g_t           \leftarrow   \nabla_{\theta} f_t (\theta_{t-1})           \\
            &\hspace{5mm} \textbf{if} \: \lambda \neq 0                                          \\
            &\hspace{10mm} g_t \leftarrow g_t + \lambda \theta_{t-1}                             \\
            &\hspace{6mm}m_t           \leftarrow   \beta_1 m_{t-1} + (1 - \beta_1) g_t          \\
            &\hspace{6mm}v_t           \leftarrow   \beta_2 v_{t-1} + (1-\beta_2) g^2_t          \\
            &\hspace{6mm}\widehat{m_t} \leftarrow   m_t/\big(1-\beta_1^t \big)                   \\
            &\hspace{6mm}\rho_t \leftarrow \rho_{\infty} -
                2 t \beta^t_2 /\big(1-\beta_2^t \big)                                    \\[0.1.ex]
            &\hspace{6mm}\textbf{if} \: \rho_t > 5                                               \\
            &\hspace{12mm} l_t \leftarrow \frac{\sqrt{ (1-\beta^t_2) }}{ \sqrt{v_t} +\epsilon  } \\
            &\hspace{12mm} r_t \leftarrow
      \sqrt{\frac{(\rho_t-4)(\rho_t-2)\rho_{\infty}}{(\rho_{\infty}-4)(\rho_{\infty}-2) \rho_t}} \\
            &\hspace{12mm}\theta_t \leftarrow \theta_{t-1} - \gamma \widehat{m_t} r_t l_t        \\
            &\hspace{6mm}\textbf{else}                                                           \\
            &\hspace{12mm}\theta_t \leftarrow \theta_{t-1} - \gamma \widehat{m_t}                \\
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
            &\bf{return} \:  \theta_t                                                     \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
       \end{aligned}

    For further details regarding the algorithm we refer to `On the variance of the adaptive learning rate and beyond`_.

    This implementation uses the same weight_decay implementation as Adam (were the weight_decay is applied
    to the gradient) and not the one from AdamW (were weight_decay is applied to the update). This
    is different from the `author's implementation`_.
    a"  
    Args:
        params (iterable): iterable of parameters to optimize or dicts defining
            parameter groups
        lr (float, optional): learning rate (default: 1e-3)
        betas (Tuple[float, float], optional): coefficients used for computing
            running averages of gradient and its square (default: (0.9, 0.999))
        eps (float, optional): term added to the denominator to improve
            numerical stability (default: 1e-8)
        weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
        z	
        z

    .. _On the variance of the adaptive learning rate and beyond:
        https://arxiv.org/abs/1908.03265
    .. _author's implementation:
        https://github.com/LiyuanLucasLiu/RAdam

    F)r    r9   r:   r;   r<   r   r   r?   r@   r   r   r   c                C   s   t dd |D std|dkr4t| |dd\}}|rJtj rJtd|r^tj s^t}nt}|| |||||||	|
||d dS )	zpFunctional API that performs RAdam algorithm computation.

    See :class:`~torch.optim.RAdam` for details.
    c                 s   s   | ]}t |tjV  qd S )N)
isinstancer.   r   ).0tr$   r$   r%   	<genexpr>   s     zradam.<locals>.<genexpr>zPAPI has changed, `state_steps` argument must contain a list of singleton tensorsNF)Z	use_fusedz6torch.jit.script not supported with foreach optimizers)r?   r@   r   r   r   r   )allr7   r	   r.   ZjitZis_scripting_multi_tensor_radam_single_tensor_radam)r    r9   r:   r;   r<   r   r   r?   r@   r   r   r   _funcr$   r$   r%   r      s0    )r    r9   r:   r;   r<   r?   r@   r   r   r   r   c                C   sh  t | D ]X\}}|| }|| }|| }|| }|d7 }t|}d||  }d||  }|dkrp|j||d}||d|  ||j||d| d || }dd|  d }|d| ||  |  }|dkrPt|d |d  | |d |d  |  }| }|
r||	}n
||	}t|| }|j|| | | dd q|j|| dd qd S )	Nr   r   alpha)value   g      @   g      )		enumerater   addZlerp_Zmul_Zaddcmul_mathsqrtZadd_)r    r9   r:   r;   r<   r?   r@   r   r   r   r   iparamr5   r3   r4   Zstep_tr&   bias_correction1Zbias_correction2Zbias_corrected_exp_avgrho_infrho_trectZexp_avg_sq_sqrtZadaptive_lrr$   r$   r%   rM      sB    

rM   c                   s~  t | dkrd S |
rtdt| ||||g}| D ]>\\}}}}}}t|d dd  d fdd|D }|dkrtj|||d}t||d   t	| t
|||d  ~fdd|D }d	d |D } fd
d|D }tfddt||D }fddt|||D }t|}t||	 t|| t| t|| t
||| q8d S )Nr   z#_foreach ops don't support autogradr   rS   c                    s8   g | ]0}d t |  t |  d t |    qS )rS   r   r   rH   r&   )r@   r\   r$   r%   
<listcomp>K  s   z'_multi_tensor_radam.<locals>.<listcomp>rP   c                    sD   g | ]<}|d kr<t |d |d     d  d  |  ndqS )   rT   rS   r   )r   )rH   r]   )r\   r$   r%   ra   Z  s   	c                 S   s   g | ]}|d krd ndqS )r   r   r$   )rH   r^   r$   r$   r%   ra   e  s     c                    s   g | ]}d  t |  qS )r   r_   r`   )r?   r$   r%   ra   g  s     c                    s    g | ]\}} | | d  qS )r$   )rH   r^   bc)r   r$   r%   ra   h  s     c                    s6   g | ].\}}}t d  t|  | |  d qS )r   rc   )r   r   )rH   r&   r^   rd   )r@   r   r$   r%   ra   i  s   )r-   AssertionErrorr   Z"_group_tensors_by_device_and_dtyper,   r.   Z_foreach_add_Z_foreach_addZ_foreach_lerp_Z_foreach_mul_Z_foreach_addcmul_r   zipZ_foreach_sqrtZ_foreach_div_Z_foreach_reciprocal_)r    r9   r:   r;   r<   r?   r@   r   r   r   r   Zgrouped_tensorsZgrouped_paramsZgrouped_gradsZgrouped_exp_avgsZgrouped_exp_avg_sqsZgrouped_state_stepsrN   Z
rho_t_listr^   Zunrectifiedr[   Zunrect_step_sizeZ*bias_correction2_sqrt_times_rect_step_sizebufferr$   )r?   r@   r   r\   r%   rL   )  sL    

	


rL   )NF)rW   r.   r   Z	optimizerr   r   r   r   r   r	   r
   r   typingr   r   __all__r   __doc__rE   r0   r   rM   rL   r$   r$   r$   r%   <module>   sj   (o'
D  6>