U
    9%e26                     @   s  d dl Z d dl mZ ddlmZmZmZmZmZmZm	Z	 d dl
mZmZ ddgZG dd deZd	d
e	 de de d d e_dee ee eee  eee eeeeeedddZee ee eee  eeeeeeed
ddZee ee eee  eeeeeeed
ddZdS )    N)Tensor   )	Optimizerrequired_use_grad_for_differentiable_default_to_fused_or_foreach_differentiable_doc_foreach_doc_maximize_doc)ListOptionalSGDsgdc                       sb   e Zd Zeddddfddddeee ed fddZ fddZd	d
 Ze	dddZ
  ZS )r   r   FN)maximizeforeachdifferentiablec             
      s   |t k	r|dk rtd| |dk r4td| |dk rJtd| t||||||||	d}
|r~|dksv|dkr~tdt ||
 d S )Ng        zInvalid learning rate: zInvalid momentum value: zInvalid weight_decay value: )lrmomentum	dampeningweight_decaynesterovr   r   r   r   z8Nesterov momentum requires a momentum and zero dampening)r   
ValueErrordictsuper__init__)selfparamsr   r   r   r   r   r   r   r   defaults	__class__ N/var/www/html/Darija-Ai-API/env/lib/python3.8/site-packages/torch/optim/sgd.pyr   
   s       zSGD.__init__c                    sL   t  | | jD ]4}|dd |dd |dd  |dd qd S )Nr   Fr   r   r   )r   __setstate__param_groups
setdefault)r   stategroupr   r    r!   r"      s    
zSGD.__setstate__c                 C   sn   d}|d D ]\}|j d k	r|| ||j  |j jr<d}| j| }d|krZ|d  q||d  q|S )NFr   Tmomentum_buffer)gradappend	is_sparser%   )r   r&   params_with_gradd_p_listmomentum_buffer_listhas_sparse_gradpr%   r    r    r!   _init_group$   s    


zSGD._init_groupc                 C   s   d}|dk	r&t   | }W 5 Q R X | jD ]}g }g }g }| ||||}t||||d |d |d |d |d |d ||d d	 t||D ]\}}	| j| }
|	|
d
< qq,|S )zPerforms a single optimization step.

        Args:
            closure (Callable, optional): A closure that reevaluates the model
                and returns the loss.
        Nr   r   r   r   r   r   r   )r   r   r   r   r   r   r.   r   r'   )torchZenable_gradr#   r0   r   zipr%   )r   closureZlossr&   r+   r,   r-   r.   r/   r'   r%   r    r    r!   step7   s2    


zSGD.step)N)__name__
__module____qualname__r   boolr   r   r"   r0   r   r4   __classcell__r    r    r   r!   r   	   s       a  Implements stochastic gradient descent (optionally with momentum).

    .. math::
       \begin{aligned}
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{input}      : \gamma \text{ (lr)}, \: \theta_0 \text{ (params)}, \: f(\theta)
                \text{ (objective)}, \: \lambda \text{ (weight decay)},                          \\
            &\hspace{13mm} \:\mu \text{ (momentum)}, \:\tau \text{ (dampening)},
            \:\textit{ nesterov,}\:\textit{ maximize}                                     \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do}                         \\
            &\hspace{5mm}g_t           \leftarrow   \nabla_{\theta} f_t (\theta_{t-1})           \\
            &\hspace{5mm}\textbf{if} \: \lambda \neq 0                                           \\
            &\hspace{10mm} g_t \leftarrow g_t + \lambda  \theta_{t-1}                            \\
            &\hspace{5mm}\textbf{if} \: \mu \neq 0                                               \\
            &\hspace{10mm}\textbf{if} \: t > 1                                                   \\
            &\hspace{15mm} \textbf{b}_t \leftarrow \mu \textbf{b}_{t-1} + (1-\tau) g_t           \\
            &\hspace{10mm}\textbf{else}                                                          \\
            &\hspace{15mm} \textbf{b}_t \leftarrow g_t                                           \\
            &\hspace{10mm}\textbf{if} \: \textit{nesterov}                                       \\
            &\hspace{15mm} g_t \leftarrow g_{t} + \mu \textbf{b}_t                             \\
            &\hspace{10mm}\textbf{else}                                                   \\[-1.ex]
            &\hspace{15mm} g_t  \leftarrow  \textbf{b}_t                                         \\
            &\hspace{5mm}\textbf{if} \: \textit{maximize}                                          \\
            &\hspace{10mm}\theta_t \leftarrow \theta_{t-1} + \gamma g_t                   \\[-1.ex]
            &\hspace{5mm}\textbf{else}                                                    \\[-1.ex]
            &\hspace{10mm}\theta_t \leftarrow \theta_{t-1} - \gamma g_t                   \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
            &\bf{return} \:  \theta_t                                                     \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
       \end{aligned}

    Nesterov momentum is based on the formula from
    `On the importance of initialization and momentum in deep learning`__.
    a  
    Args:
        params (iterable): iterable of parameters to optimize or dicts defining
            parameter groups
        lr (float): learning rate
        momentum (float, optional): momentum factor (default: 0)
        weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
        dampening (float, optional): dampening for momentum (default: 0)
        nesterov (bool, optional): enables Nesterov momentum (default: False)
        z	
        z
    a  

    Example:
        >>> # xdoctest: +SKIP
        >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
        >>> optimizer.zero_grad()
        >>> loss_fn(model(input), target).backward()
        >>> optimizer.step()

    __ http://www.cs.toronto.edu/%7Ehinton/absps/momentum.pdf

    .. note::
        The implementation of SGD with Momentum/Nesterov subtly differs from
        Sutskever et. al. and implementations in some other frameworks.

        Considering the specific case of Momentum, the update can be written as

        .. math::
            \begin{aligned}
                v_{t+1} & = \mu * v_{t} + g_{t+1}, \\
                p_{t+1} & = p_{t} - \text{lr} * v_{t+1},
            \end{aligned}

        where :math:`p`, :math:`g`, :math:`v` and :math:`\mu` denote the
        parameters, gradient, velocity, and momentum respectively.

        This is in contrast to Sutskever et. al. and
        other frameworks which employ an update of the form

        .. math::
            \begin{aligned}
                v_{t+1} & = \mu * v_{t} + \text{lr} * g_{t+1}, \\
                p_{t+1} & = p_{t} - v_{t+1}.
            \end{aligned}

        The Nesterov version is analogously modified.

        Moreover, the initial value of the momentum buffer is set to the
        gradient value at the first step. This is in contrast to some other
        frameworks that initialize it to all zeros.

    )r   r,   r-   r.   r   r   r   r   r   r   r   c                C   sx   |dkr*t j s&t| ddd\}}nd}|r@t j r@td|rTt j sTt}nt}|| |||||||	||
d
 dS )zlFunctional API that performs SGD algorithm computation.

    See :class:`~torch.optim.SGD` for details.
    NF)r   Z	use_fusedz6torch.jit.script not supported with foreach optimizers)r   r   r   r   r   r.   r   )r1   ZjitZis_scriptingr   RuntimeError_multi_tensor_sgd_single_tensor_sgd)r   r,   r-   r.   r   r   r   r   r   r   r   _funcr    r    r!   r      s(    
)
r   r,   r-   r   r   r   r   r   r   r.   c                C   s   t | D ]\}
}|s||
 n||
  }|dkr<|j||d}|dkr||
 }|d krlt| }|||
< n||j|d| d |r|j||d}n|}|j|| d qd S )Nr   alphar   )	enumerateaddr1   clonedetachmul_add_)r   r,   r-   r   r   r   r   r   r   r.   iparamZd_pbufr    r    r!   r<      s    
r<   )
r   gradsr-   r   r   r   r   r   r   r.   c                C   s  t | dkrd S tj| ||gdd}
|
 D ]\\}}}}tdd |D }|r\t|}|dkr|rztj|||d ntj|||d}|dkrg }d}t	t |D ](}|| d krd} qq|
||  q|rt|| tj||d| d n|g }t	t |D ]j}|| d krBt||   } ||< ||| < n$|| }||j|| d| d |
| q|rtj|||d n|}|stj||| d q,t	t |D ]}|| j|| | d qq,d S )	Nr   T)Zwith_indicesc                 s   s   | ]}|j V  qd S )N)r*   ).0r(   r    r    r!   	<genexpr>  s     z$_multi_tensor_sgd.<locals>.<genexpr>r?   Fr   )lenr   Z"_group_tensors_by_device_and_dtypevaluesanyr1   Z_foreach_negZ_foreach_add_Z_foreach_addranger)   Z_foreach_mul_rC   rD   rE   rF   )r   rJ   r-   r   r   r   r   r   r   r.   Zgrouped_tensorsZdevice_paramsZdevice_gradsZdevice_momentum_buffer_listindicesZdevice_has_sparse_gradZbufsZall_states_with_momentum_bufferrG   rI   r    r    r!   r;   
  sJ    

r;   )NN)r1   r   Z	optimizerr   r   r   r   r   r	   r
   typingr   r   __all__r   __doc__r8   floatr   r<   r;   r    r    r    r!   <module>   sh   $V"	
._  
.
#
