U
    9%e5                     @   s(  d dl Z d dl mZ ddlmZmZmZmZmZmZm	Z	 d dl
mZmZ ddgZG dd deZd	d
e de	 de d e_dee ee ee ee eee eeeeeedddZdd Zee ee ee ee eeeeeeedddZee ee ee ee eeeeeeedddZdS )    N)Tensor   )	Optimizer_use_grad_for_differentiable
_get_value_default_to_fused_or_foreach_differentiable_doc_foreach_doc_maximize_doc)ListOptionalAdagradadagradc                       s^   e Zd Zddddee eed fdd	Z fd
dZdd Zdd Ze	dddZ
  ZS )r   {Gz?r   绽|=NF)maximizedifferentiable)foreachr   r   c             
      s   d|kst d| d|ks,t d| d|ksBt d| d|ksXt d| d|ksnt d| t||||||||	d}
t ||
 | jD ]X}|d D ]J}| j| }td|d	< t|rt	||n|}tj
||tjd
|d< qqd S )Ng        zInvalid learning rate: zInvalid lr_decay value: zInvalid weight_decay value: z)Invalid initial_accumulator_value value: zInvalid epsilon value: )lrlr_decayepsweight_decayinitial_accumulator_valuer   r   r   paramsstep)Zmemory_formatsum)
ValueErrordictsuper__init__param_groupsstatetorchtensor
is_complexcomplexZ	full_likeZpreserve_format)selfr   r   r   r   r   r   r   r   r   defaultsgrouppr!   Z
init_value	__class__ R/var/www/html/Darija-Ai-API/env/lib/python3.8/site-packages/torch/optim/adagrad.pyr      sH    


  zAdagrad.__init__c                    s   t  | | jD ](}|dd  |dd |dd qt| j }t|dkoft	|d d }|s|D ]}t
t|d |d< qpd S )Nr   r   Fr   r   r   )r   __setstate__r    
setdefaultlistr!   valueslenr"   Z	is_tensorr#   float)r&   r!   r(   Zstate_valuesZstep_is_tensorsr*   r,   r-   r.   ?   s    

zAdagrad.__setstate__c                 C   s4   | j D ](}|d D ]}| j| }|d   qqd S )Nr   r   )r    r!   Zshare_memory_)r&   r(   r)   r!   r,   r,   r-   share_memoryN   s    

zAdagrad.share_memoryc           	      C   sh   d}|d D ]V}|j d k	r|j jr&d}|| ||j  | j| }||d  ||d  q|S )NFr   Tr   r   )grad	is_sparseappendr!   )	r&   r(   params_with_gradgrads
state_sumsstate_stepshas_sparse_gradr)   r!   r,   r,   r-   _init_groupT   s    


zAdagrad._init_groupc           	      C   s   d}|dk	r&t   | }W 5 Q R X | jD ]b}g }g }g }g }| |||||}t|||||d |d |d |d ||d |d |d d	 q,|S )
zPerforms a single optimization step.

        Args:
            closure (Callable, optional): A closure that reevaluates the model
                and returns the loss.
        Nr   r   r   r   r   r   r   )r   r   r   r   r=   r   r   r   )r"   Zenable_gradr    r>   r   )	r&   closureZlossr(   r9   r:   r;   r<   r=   r,   r,   r-   r   b   s2    

zAdagrad.step)r   r   r   r   r   N)N)__name__
__module____qualname__r   boolr   r.   r5   r>   r   r   __classcell__r,   r,   r*   r-   r      s&         
3a[  Implements Adagrad algorithm.

    .. math::
       \begin{aligned}
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{input}      : \gamma \text{ (lr)}, \: \theta_0 \text{ (params)}, \: f(\theta)
                \text{ (objective)}, \: \lambda \text{ (weight decay)},                          \\
            &\hspace{12mm}    \tau \text{ (initial accumulator value)}, \: \eta\text{ (lr decay)}\\
            &\textbf{initialize} :  state\_sum_0 \leftarrow 0                             \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do}                         \\
            &\hspace{5mm}g_t           \leftarrow   \nabla_{\theta} f_t (\theta_{t-1})           \\
            &\hspace{5mm} \tilde{\gamma}    \leftarrow \gamma / (1 +(t-1) \eta)                  \\
            &\hspace{5mm} \textbf{if} \: \lambda \neq 0                                          \\
            &\hspace{10mm} g_t \leftarrow g_t + \lambda \theta_{t-1}                             \\
            &\hspace{5mm}state\_sum_t  \leftarrow  state\_sum_{t-1} + g^2_t                      \\
            &\hspace{5mm}\theta_t \leftarrow
                \theta_{t-1}- \tilde{\gamma} \frac{g_t}{\sqrt{state\_sum_t}+\epsilon}            \\
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
            &\bf{return} \:  \theta_t                                                     \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
       \end{aligned}

    For further details regarding the algorithm we refer to `Adaptive Subgradient Methods for Online Learning
    and Stochastic Optimization`_.
    a  
    Args:
        params (iterable): iterable of parameters to optimize or dicts defining
            parameter groups
        lr (float, optional): learning rate (default: 1e-2)
        lr_decay (float, optional): learning rate decay (default: 0)
        weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
        eps (float, optional): term added to the denominator to improve
            numerical stability (default: 1e-10)
        z	
        z

    .. _Adaptive Subgradient Methods for Online Learning and Stochastic
        Optimization: http://jmlr.org/papers/v12/duchi11a.html

    F)r   r:   r;   r<   r=   r   r   r   r   r   r   r   c                C   s   t dd |D std|dkr4t| |dd\}}|rJtj rJtd|r^tj s^t}nt}|| ||||||	|
|||d dS )	ztFunctional API that performs Adagrad algorithm computation.

    See :class:`~torch.optim.Adagrad` for details.
    c                 s   s   | ]}t |tjV  qd S N)
isinstancer"   r   ).0tr,   r,   r-   	<genexpr>   s     zadagrad.<locals>.<genexpr>zPAPI has changed, `state_steps` argument must contain a list of singleton tensorsNF)Z	use_fusedz6torch.jit.script not supported with foreach optimizersr   r   r   r   r=   r   r   )allRuntimeErrorr   r"   ZjitZis_scripting_multi_tensor_adagrad_single_tensor_adagrad)r   r:   r;   r<   r=   r   r   r   r   r   r   r   _funcr,   r,   r-   r      s0    c                 C   s8   |   }| dks | dkr*t| S t|||S )Nr   )sizeZnumelr"   Z
empty_likeZsparse_coo_tensor)r6   grad_indicesr1   rQ   r,   r,   r-   _make_sparse   s    
rS   )r   r:   r;   r<   r   r   r   r   r=   r   r   c             	   C   sr  t | |||D ]\\}}}}|d7 }t|}|	s4|n| }|dkr^|jrPtd|j||d}|d|d |   }|jr| }| }| }|t	|||
d ||}|  |}|jt	|||| | d qt|}|rt|}t|}t|}|j||dd |
r4| | }n| |}|j||| d |rt|}t|}qd S )Nr   r   z;weight_decay option is not compatible with sparse gradientsalpha   value)zipr   r7   rL   addZcoalesceZ_indicesZ_valuesZadd_rS   powZsparse_maskZsqrt_r"   r$   view_as_realZaddcmul_sqrtZaddcdiv_Zview_as_complex)r   r:   r;   r<   r   r   r   r   r=   r   r   paramr6   Z	state_sumZstep_tr   ZclrrR   Zgrad_valuesstdZ
std_valuesr$   r,   r,   r-   rN      sF    
 




rN   c                   sp  |
rt dt| dkrd S t| |||g}| D ]2\\}}}}}tdd |D }|rt|||| ||dd|
d  S |	rt|}dd	 |D }d
d	 |D }dd	 |D }t	|d |dkr|	rtj	|||d ntj
|||d} fdd	|D }tj|||dd t|}t	|| |dks>|	rPt|| |}nt||}t||| q6d S )Nz#_foreach ops don't support autogradr   c                 s   s   | ]}|j V  qd S rE   )r7   )rG   r6   r,   r,   r-   rI   D  s     z(_multi_tensor_adagrad.<locals>.<genexpr>TFrJ   c                 S   s$   g | ]}t |rt |n|qS r,   r"   r$   r\   rG   xr,   r,   r-   
<listcomp>Y  s     z)_multi_tensor_adagrad.<locals>.<listcomp>c                 S   s$   g | ]}t |rt |n|qS r,   r`   ra   r,   r,   r-   rc   Z  s    c                 S   s$   g | ]}t |rt |n|qS r,   r`   ra   r,   r,   r-   rc   ]  s     r   rT   c                    s&   g | ]}  d t |d     qS )r   )r   )rG   r   r   r   r,   r-   rc   i  s     rW   )AssertionErrorr2   r   Z"_group_tensors_by_device_and_dtyper1   anyrN   r"   Z_foreach_negZ_foreach_add_Z_foreach_addZ_foreach_addcmul_Z_foreach_sqrtZ_foreach_mul_Z_foreach_mulZ_foreach_addcdiv_)r   r:   r;   r<   r   r   r   r   r=   r   r   Zgrouped_tensorlistsZdevice_paramsZdevice_gradsZdevice_state_sumsZdevice_state_stepsrO   Zdevice_has_sparse_gradZ	minus_clrr_   	numeratorr,   rd   r-   rM   ,  sR    


rM   )NNF)r"   r   Z	optimizerr   r   r   r   r   r	   r
   typingr   r   __all__r   __doc__rC   r3   r   rS   rN   rM   r,   r,   r,   r-   <module>   sp   $	
3   5;