U
    9%e{                     @   sD  d Z ddlZddlZddlmZ ddlmZmZmZm	Z	m
Z
 ddlZddlmZ ddlmZ ddlmZmZ dd	lmZ dd
lmZ ddlmZ eeZdEddZdFeedddZedddZeedddZdGeeedddZ eeedddZ!dHddZ"eeee#d d!d"Z$dIeeee#ed$d%d&Z%eeeed d'd(Z&dJeeeeed$d)d*Z'eeee#e#ed+d,d-Z(dKd0d1Z)dd2eeed3d4d5Z*dLeeeed6d7d8Z+ej,e"ej-e%ej.e'ej/e)ej0eej1e ej2e+ej3eiZ4dMe
e5ef eee ee d9d:d;Z6G d<d= d=eZ7G d>d? d?eZ8G d@dA dAeZ9dNdCdDZ:dS )Oz$PyTorch optimization for BERT model.    N)partial)CallableIterableOptionalTupleUnion)nn)	Optimizer)LambdaLRReduceLROnPlateau   )SchedulerType)logging)require_versionc                 C   s   dS Nr    _r   r   X/var/www/html/Darija-Ai-API/env/lib/python3.8/site-packages/transformers/optimization.py_get_constant_lambda#   s    r   	optimizer
last_epochc                 C   s   t | t|dS )a  
    Create a schedule with a constant learning rate, using the learning rate set in optimizer.

    Args:
        optimizer ([`~torch.optim.Optimizer`]):
            The optimizer for which to schedule the learning rate.
        last_epoch (`int`, *optional*, defaults to -1):
            The index of the last epoch when resuming training.

    Return:
        `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
    r   )r
   r   r   r   r   r   get_constant_schedule'   s    r   r   c                 C   s   t | S )aL  
    Create a schedule with a constant learning rate that decreases when a metric has stopped improving.

    Args:
        optimizer ([`~torch.optim.Optimizer`]):
            The optimizer for which to schedule the learning rate.

    Return:
        `torch.optim.lr_scheduler.ReduceLROnPlateau` with the appropriate schedule.
    )r   r   r   r   r   get_reduce_on_plateau_schedule8   s    r   current_stepnum_warmup_stepsc                C   s"   | |k rt | t td| S dS )N      ?floatmaxr   r   r   r   ,_get_constant_schedule_with_warmup_lr_lambdaG   s    r%   )r   r    r   c                 C   s   t t|d}t| ||dS )ad  
    Create a schedule with a constant learning rate preceded by a warmup period during which the learning rate
    increases linearly between 0 and the initial lr set in the optimizer.

    Args:
        optimizer ([`~torch.optim.Optimizer`]):
            The optimizer for which to schedule the learning rate.
        num_warmup_steps (`int`):
            The number of steps for the warmup phase.
        last_epoch (`int`, *optional*, defaults to -1):
            The index of the last epoch when resuming training.

    Return:
        `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
    r    r   )r   r%   r
   )r   r    r   	lr_lambdar   r   r   !get_constant_schedule_with_warmupM   s    r(   r   r    num_training_stepsc                C   sB   | |k rt | t td| S tdt ||  t td||  S )Nr           r"   r)   r   r   r   *_get_linear_schedule_with_warmup_lr_lambdab   s    r,   c                 C   s   t t||d}t| ||S )a  
    Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0, after
    a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer.

    Args:
        optimizer ([`~torch.optim.Optimizer`]):
            The optimizer for which to schedule the learning rate.
        num_warmup_steps (`int`):
            The number of steps for the warmup phase.
        num_training_steps (`int`):
            The total number of training steps.
        last_epoch (`int`, *optional*, defaults to -1):
            The index of the last epoch when resuming training.

    Return:
        `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
    r    r*   )r   r,   r
   )r   r    r*   r   r'   r   r   r   get_linear_schedule_with_warmuph   s    r.   )r   r    r*   
num_cyclesc             	   C   sf   | |k rt | t td| S t | | t td||  }tdddttjt | d |   S )Nr   r+         ?r!   g       @r#   r$   mathcospir   r    r*   r/   progressr   r   r   *_get_cosine_schedule_with_warmup_lr_lambda   s    r7   r0   )r   r    r*   r/   r   c                 C   s   t t|||d}t| ||S )a  
    Create a schedule with a learning rate that decreases following the values of the cosine function between the
    initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the
    initial lr set in the optimizer.

    Args:
        optimizer ([`~torch.optim.Optimizer`]):
            The optimizer for which to schedule the learning rate.
        num_warmup_steps (`int`):
            The number of steps for the warmup phase.
        num_training_steps (`int`):
            The total number of training steps.
        num_cycles (`float`, *optional*, defaults to 0.5):
            The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0
            following a half-cosine).
        last_epoch (`int`, *optional*, defaults to -1):
            The index of the last epoch when resuming training.

    Return:
        `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
    r    r*   r/   )r   r7   r
   r   r    r*   r/   r   r'   r   r   r   get_cosine_schedule_with_warmup   s    r:   c             	   C   sr   | |k rt | t td| S t | | t td||  }|dkrHdS tdddttjt || d    S )Nr   r!   r+   r0   r1   r5   r   r   r   =_get_cosine_with_hard_restarts_schedule_with_warmup_lr_lambda   s    r;   c                 C   s   t t|||d}t| ||S )a  
    Create a schedule with a learning rate that decreases following the values of the cosine function between the
    initial lr set in the optimizer to 0, with several hard restarts, after a warmup period during which it increases
    linearly between 0 and the initial lr set in the optimizer.

    Args:
        optimizer ([`~torch.optim.Optimizer`]):
            The optimizer for which to schedule the learning rate.
        num_warmup_steps (`int`):
            The number of steps for the warmup phase.
        num_training_steps (`int`):
            The total number of training steps.
        num_cycles (`int`, *optional*, defaults to 1):
            The number of hard restarts to use.
        last_epoch (`int`, *optional*, defaults to -1):
            The index of the last epoch when resuming training.

    Return:
        `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
    r8   )r   r;   r
   r9   r   r   r   2get_cosine_with_hard_restarts_schedule_with_warmup   s    r<   )r   r    r*   lr_endpowerlr_initc          
      C   sj   | |k rt | t td| S | |kr.|| S || }|| }d| | |  }|||  | }	|	| S d S r   r"   )
r   r    r*   r=   r>   r?   Zlr_rangeZdecay_stepsZpct_remainingdecayr   r   r   4_get_polynomial_decay_schedule_with_warmup_lr_lambda   s    	rA   Hz>r!   c                 C   sH   | j d }||ks(td| d| dtt|||||d}t| ||S )a  
    Create a schedule with a learning rate that decreases as a polynomial decay from the initial lr set in the
    optimizer to end lr defined by *lr_end*, after a warmup period during which it increases linearly from 0 to the
    initial lr set in the optimizer.

    Args:
        optimizer ([`~torch.optim.Optimizer`]):
            The optimizer for which to schedule the learning rate.
        num_warmup_steps (`int`):
            The number of steps for the warmup phase.
        num_training_steps (`int`):
            The total number of training steps.
        lr_end (`float`, *optional*, defaults to 1e-7):
            The end LR.
        power (`float`, *optional*, defaults to 1.0):
            Power factor.
        last_epoch (`int`, *optional*, defaults to -1):
            The index of the last epoch when resuming training.

    Note: *power* defaults to 1.0 as in the fairseq implementation, which in turn is based on the original BERT
    implementation at
    https://github.com/google-research/bert/blob/f39e881b169b9d53bea03d2d341b31707a6c052b/optimization.py#L37

    Return:
        `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.

    lrzlr_end (z&) must be be smaller than initial lr ())r    r*   r=   r>   r?   )defaults
ValueErrorr   rA   r
   )r   r    r*   r=   r>   r   r?   r'   r   r   r   )get_polynomial_decay_schedule_with_warmup   s    
rG   )	timescale)r   r    rH   c                C   s@   | |k rt | t td| S || }dt| | |  }|S )Nr   r!   )r#   r$   r2   sqrt)r   r    rH   shiftr@   r   r   r   $_get_inverse_sqrt_schedule_lr_lambda  s
    rK   )r   r    rH   r   c                 C   s(   |dkr|}t t||d}t| ||dS )a  
    Create a schedule with an inverse square-root learning rate, from the initial lr set in the optimizer, after a
    warmup period which increases lr linearly from 0 to the initial lr set in the optimizer.

    Args:
        optimizer ([`~torch.optim.Optimizer`]):
            The optimizer for which to schedule the learning rate.
        num_warmup_steps (`int`):
            The number of steps for the warmup phase.
        timescale (`int`, *optional*, defaults to `num_warmup_steps`):
            Time scale.
        last_epoch (`int`, *optional*, defaults to -1):
            The index of the last epoch when resuming training.

    Return:
        `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
    N)r    rH   r   )r   rK   r
   )r   r    rH   r   r'   r   r   r   get_inverse_sqrt_schedule%  s    rL   )namer   r    r*   c                 C   s   t | } t|  }| t jks$| t jkr,||S |dkrBt|  d| t jkrX|||dS | t jkrn|||dS |dkrt|  d||||dS )a  
    Unified API to get any scheduler from its name.

    Args:
        name (`str` or `SchedulerType`):
            The name of the scheduler to use.
        optimizer (`torch.optim.Optimizer`):
            The optimizer that will be used during training.
        num_warmup_steps (`int`, *optional*):
            The number of warmup steps to do. This is not required by all schedulers (hence the argument being
            optional), the function will raise an error if it's unset and the scheduler type requires it.
        num_training_steps (`int``, *optional*):
            The number of training steps to do. This is not required by all schedulers (hence the argument being
            optional), the function will raise an error if it's unset and the scheduler type requires it.
    Nz; requires `num_warmup_steps`, please provide that argument.r&   z= requires `num_training_steps`, please provide that argument.r-   )r   TYPE_TO_SCHEDULER_FUNCTIONCONSTANTREDUCE_ON_PLATEAUrF   CONSTANT_WITH_WARMUPINVERSE_SQRT)rM   r   r    r*   Zschedule_funcr   r   r   get_schedulerO  s    

rS   c                	       s\   e Zd ZdZdeejj ee	eef eee
e
d fd	d
Ze dedddZ  ZS )AdamWa5  
    Implements Adam algorithm with weight decay fix as introduced in [Decoupled Weight Decay
    Regularization](https://arxiv.org/abs/1711.05101).

    Parameters:
        params (`Iterable[nn.parameter.Parameter]`):
            Iterable of parameters to optimize or dictionaries defining parameter groups.
        lr (`float`, *optional*, defaults to 1e-3):
            The learning rate to use.
        betas (`Tuple[float,float]`, *optional*, defaults to (0.9, 0.999)):
            Adam's betas parameters (b1, b2).
        eps (`float`, *optional*, defaults to 1e-6):
            Adam's epsilon for numerical stability.
        weight_decay (`float`, *optional*, defaults to 0):
            Decoupled weight decay to apply.
        correct_bias (`bool`, *optional*, defaults to `True`):
            Whether or not to correct bias in Adam (for instance, in Bert TF repository they use `False`).
        no_deprecation_warning (`bool`, *optional*, defaults to `False`):
            A flag used to disable the deprecation warning (set to `True` to disable the warning).
    MbP?g?g+?ư>r+   TF)paramsrC   betasepsweight_decaycorrect_biasno_deprecation_warningc           	         s   |st dt td |dk r0td| dd|d   krHdk s^n td|d  d	d|d
   krvdk sn td|d
  d	d|kstd| d|||||d}t || d S )NzThis implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warningtorch>=1.5.0r+   zInvalid learning rate: z - should be >= 0.0r   r!   zInvalid beta parameter: z - should be in [0.0, 1.0)r   zInvalid epsilon value: )rC   rY   rZ   r[   r\   )warningswarnFutureWarningr   rF   super__init__)	selfrX   rC   rY   rZ   r[   r\   r]   rE   	__class__r   r   rc     s     
zAdamW.__init__N)closurec                 C   s~  d}|dk	r| }| j D ]^}|d D ]N}|jdkr8q&|j}|jrLtd| j| }t|dkrd|d< t||d< t||d< |d |d  }}|d \}	}
|d  d	7  < ||	j	|d
|	 d ||
j
||d
|
 d | 	|d }|d }|d r<d
|	|d   }d
|
|d   }|t| | }|j||| d |d dkr&|j	||d  |d  d q&q|S )z
        Performs a single optimization step.

        Arguments:
            closure (`Callable`, *optional*): A closure that reevaluates the model and returns the loss.
        NrX   zJAdam does not support sparse gradients, please consider SparseAdam insteadr   stepexp_avg
exp_avg_sqrY   r   r!   alpha)valuerZ   rC   r\   r[   r+   )param_groupsgrad	is_sparseRuntimeErrorstatelentorch
zeros_likemul_add_Zaddcmul_rI   r2   Zaddcdiv_)rd   rg   lossgrouppro   rr   ri   rj   beta1Zbeta2denomZ	step_sizeZbias_correction1Zbias_correction2r   r   r   rh     s<    



 z
AdamW.step)rU   rV   rW   r+   TF)N)__name__
__module____qualname____doc__r   r   Z	parameter	Parameterr#   r   boolrc   rt   no_gradr   rh   __classcell__r   r   re   r   rT   z  s$         

rT   c                	       sd   e Zd ZdZd fd	d
	Zedd Zedd Zedd Zedd Z	e
 dddZ  ZS )	Adafactora!  
    AdaFactor pytorch implementation can be used as a drop in replacement for Adam original fairseq code:
    https://github.com/pytorch/fairseq/blob/master/fairseq/optim/adafactor.py

    Paper: *Adafactor: Adaptive Learning Rates with Sublinear Memory Cost* https://arxiv.org/abs/1804.04235 Note that
    this optimizer internally adjusts the learning rate depending on the `scale_parameter`, `relative_step` and
    `warmup_init` options. To use a manual (external) learning rate schedule you should set `scale_parameter=False` and
    `relative_step=False`.

    Arguments:
        params (`Iterable[nn.parameter.Parameter]`):
            Iterable of parameters to optimize or dictionaries defining parameter groups.
        lr (`float`, *optional*):
            The external learning rate.
        eps (`Tuple[float, float]`, *optional*, defaults to (1e-30, 1e-3)):
            Regularization constants for square gradient and parameter scale respectively
        clip_threshold (`float`, *optional*, defaults 1.0):
            Threshold of root mean square of final gradient update
        decay_rate (`float`, *optional*, defaults to -0.8):
            Coefficient used to compute running averages of square
        beta1 (`float`, *optional*):
            Coefficient used for computing running averages of gradient
        weight_decay (`float`, *optional*, defaults to 0):
            Weight decay (L2 penalty)
        scale_parameter (`bool`, *optional*, defaults to `True`):
            If True, learning rate is scaled by root mean square
        relative_step (`bool`, *optional*, defaults to `True`):
            If True, time-dependent learning rate is computed instead of external learning rate
        warmup_init (`bool`, *optional*, defaults to `False`):
            Time-dependent learning rate computation depends on whether warm-up initialization is being used

    This implementation handles low-precision (FP16, bfloat) values, but we have not thoroughly tested.

    Recommended T5 finetuning settings (https://discuss.huggingface.co/t/t5-finetuning-tips/684/3):

        - Training without LR warmup or clip_threshold is not recommended.

           - use scheduled LR warm-up to fixed LR
           - use clip_threshold=1.0 (https://arxiv.org/abs/1804.04235)
        - Disable relative updates
        - Use scale_parameter=False
        - Additional optimizer operations like gradient clipping should not be used alongside Adafactor

    Example:

    ```python
    Adafactor(model.parameters(), scale_parameter=False, relative_step=False, warmup_init=False, lr=1e-3)
    ```

    Others reported the following combination to work well:

    ```python
    Adafactor(model.parameters(), scale_parameter=True, relative_step=True, warmup_init=True, lr=None)
    ```

    When using `lr=None` with [`Trainer`] you will most likely need to use [`~optimization.AdafactorSchedule`]
    scheduler as following:

    ```python
    from transformers.optimization import Adafactor, AdafactorSchedule

    optimizer = Adafactor(model.parameters(), scale_parameter=True, relative_step=True, warmup_init=True, lr=None)
    lr_scheduler = AdafactorSchedule(optimizer)
    trainer = Trainer(..., optimizers=(optimizer, lr_scheduler))
    ```

    Usage:

    ```python
    # replace AdamW with Adafactor
    optimizer = Adafactor(
        model.parameters(),
        lr=1e-3,
        eps=(1e-30, 1e-3),
        clip_threshold=1.0,
        decay_rate=-0.8,
        beta1=None,
        weight_decay=0.0,
        relative_step=False,
        scale_parameter=False,
        warmup_init=False,
    )
    ```NgKH9rU   r!   皙r+   TFc              
      sV   t d |d k	r|	rtd|
r,|	s,td||||||||	|
d	}t || d S )Nr^   z;Cannot combine manual `lr` and `relative_step=True` optionsz0`warmup_init=True` requires `relative_step=True`)	rC   rZ   clip_threshold
decay_rater{   r[   scale_parameterrelative_stepwarmup_init)r   rF   rb   rc   )rd   rX   rC   rZ   r   r   r{   r[   r   r   r   rE   re   r   r   rc   A  s     zAdafactor.__init__c                 C   sj   | d }| d r@| d r$d|d  nd}t |dt|d  }d}| d rbt| d	 d
 |d }|| S )NrC   r   r   rW   rh   g{Gz?r!   r   rZ   r   RMS)minr2   rI   r$   )param_groupZparam_stateZrel_step_szZmin_stepZparam_scaler   r   r   _get_lra  s    zAdafactor._get_lrc                 C   s    t |dk}| d d k	}||fS )N   r{   )rs   )r   Zparam_shapefactoreduse_first_momentr   r   r   _get_optionsl  s    zAdafactor._get_optionsc                 C   s   |  d|  d  S )Nr   r0   )ZnormZnumel)Ztensorr   r   r   _rmsr  s    zAdafactor._rmsc                 C   s6   | | j ddd  d}|d }t||S )Nr   T)dimZkeepdim)meanZrsqrt_Z	unsqueezersqrtrt   mul)exp_avg_sq_rowexp_avg_sq_colZr_factorZc_factorr   r   r   _approx_sq_gradv  s    zAdafactor._approx_sq_gradc                 C   s*  d}|dk	r| }| j D ]
}|d D ]}|jdkr8q&|j}|jtjtjhkrX| }|jrftd| j	| }|j
}| ||\}}	t|dkrd|d< |	rt||d< |rt|dd ||d< t|dd	 |dd  ||d
< nt||d< d|d< nV|	r(|d ||d< |rT|d ||d< |d
 ||d
< n|d ||d< |}
|jtjtjhkr|
 }
|d  d7  < | |
|d< | ||}dt|d |d  }|d |d d  }|rJ|d }|d
 }||j|jddd| d ||j|jd	dd| d | ||}|| n.|d }||j|d| d | |}|| ||d  jdd || |	r|d }||d j|d|d  d |}|d dkr|
j|
|d  | d |
|  |jtjtjhkr&||
 q&q|S )z
        Performs a single optimization step

        Arguments:
            closure (callable, optional): A closure that reevaluates the model
                and returns the loss.
        NrX   z,Adafactor does not support sparse gradients.r   rh   ri   r   r   r   r   rj   r   r   r!   r   r   rZ   )r   rk   r   )r   r{   r[   )rn   ro   Zdtypert   Zfloat16Zbfloat16r#   rp   rq   rr   shaper   rs   ru   Zzerostor   r   r2   powrv   rw   r   r   r   Zdiv_Zclamp_Zcopy_)rd   rg   rx   ry   rz   ro   rr   Z
grad_shaper   r   Zp_data_fp32rC   Zbeta2tupdater   r   rj   ri   r   r   r   rh   ~  sx    	

*
   
 zAdafactor.step)	Nr   r!   r   Nr+   TTF)N)r}   r~   r   r   rc   staticmethodr   r   r   r   rt   r   rh   r   r   r   re   r   r     s*   W          




r   c                       s*   e Zd ZdZd fdd	Zdd Z  ZS )AdafactorSchedulea8  
    Since [`~optimization.Adafactor`] performs its own scheduling, if the training loop relies on a scheduler (e.g.,
    for logging), this class creates a proxy object that retrieves the current lr values from the optimizer.

    It returns `initial_lr` during startup and the actual `lr` during stepping.
    r+   c                    sD    fdd}|j D ]} |d< qt || |j D ]
}|d= q4d S )Nc                    s    S )Nr   r   
initial_lrr   r   r'     s    z-AdafactorSchedule.__init__.<locals>.lr_lambdar   )rn   rb   rc   )rd   r   r   r'   ry   re   r   r   rc     s    


zAdafactorSchedule.__init__c                    s0   | j   fdd jD }t|dkr,| j}|S )Nc                    s8   g | ]0}|d  d j dk	r | j|d  d  qS )rX   r   N)ro   r   rr   ).0ry   optr   r   
<listcomp>  s   z,AdafactorSchedule.get_lr.<locals>.<listcomp>r   )r   rn   rs   Zbase_lrs)rd   Zlrsr   r   r   get_lr  s    
zAdafactorSchedule.get_lr)r+   )r}   r~   r   r   rc   r   r   r   r   re   r   r     s   
r   r+   c                 C   s
   t | |S )aX  
    Get a proxy schedule for [`~optimization.Adafactor`]

    Args:
        optimizer ([`~torch.optim.Optimizer`]):
            The optimizer for which to schedule the learning rate.
        initial_lr (`float`, *optional*, defaults to 0.0):
            Initial lr

    Return:
        [`~optimization.Adafactor`] proxy schedule object.


    )r   )r   r   r   r   r   get_adafactor_schedule  s    r   )N)r   )r   )r   )r0   r   )r   r   )rB   r!   r   )Nr   )NN)r+   );r   r2   r_   	functoolsr   typingr   r   r   r   r   rt   r   Ztorch.optimr	   Ztorch.optim.lr_schedulerr
   r   Ztrainer_utilsr   utilsr   Zutils.versionsr   Z
get_loggerr}   loggerr   intr   r   r%   r(   r,   r.   r#   r7   r:   r;   r<   rA   rG   rK   rL   ZLINEARZCOSINEZCOSINE_WITH_RESTARTSZ
POLYNOMIALrO   rQ   rR   rP   rN   strrS   rT   r   r   r   r   r   r   r   <module>   s   


   
       #          "     
.	                
+r r