U
    9%e                     @   sF  d Z ddlZddlmZmZmZmZmZmZ ddl	Z
ddlZddlmZ ddlmZmZmZ ddlmZ ddlmZ dd	lmZ dd
lmZmZmZmZmZmZ ddlmZ ddl m!Z!m"Z"m#Z# ddl$m%Z%m&Z&m'Z'm(Z(m)Z) ddl*m+Z+ e(,e-Z.dZ/dZ0dddddddgZ1e2e2ej3dddZ4e2e2ej3dddZ5G dd dej6Z7G dd  d ej6Z8G d!d" d"ej6Z9G d#d$ d$ej6Z:G d%d& d&ej6Z;G d'd( d(eZ<d)Z=d*Z>e&d+e=G d,d- d-e<Z?e&d.e=G d/d0 d0e<Z@e&d1e=G d2d3 d3e<ZAe&d4e=G d5d6 d6e<ZBe&d7e=G d8d9 d9e<ZCe&d:e=G d;d< d<e<ZDdS )=z
 PyTorch DistilBERT model adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM) and in
 part from HuggingFace PyTorch version of Google AI Bert model (https://github.com/google-research/bert)
    N)DictListOptionalSetTupleUnion)nn)BCEWithLogitsLossCrossEntropyLossMSELoss   )get_activation)PretrainedConfig)is_deepspeed_zero3_enabled)BaseModelOutputMaskedLMOutputMultipleChoiceModelOutputQuestionAnsweringModelOutputSequenceClassifierOutputTokenClassifierOutput)PreTrainedModel)apply_chunking_to_forward find_pruneable_heads_and_indicesprune_linear_layer)add_code_sample_docstringsadd_start_docstrings%add_start_docstrings_to_model_forwardloggingreplace_return_docstrings   )DistilBertConfigzdistilbert-base-uncasedr    z'distilbert-base-uncased-distilled-squadzdistilbert-base-casedz%distilbert-base-cased-distilled-squadzdistilbert-base-german-casedz"distilbert-base-multilingual-casedz/distilbert-base-uncased-finetuned-sst-2-englishn_posdimoutc              	   C   sZ   t  rHdd l}|jj|dd" tj dkr<t| ||d W 5 Q R X nt| ||d d S )Nr   )Zmodifier_rankr!   )r   	deepspeedzeroZGatheredParameterstorchdistributedZget_rank_create_sinusoidal_embeddings)r"   r#   r$   r%    r*   q/var/www/html/Darija-Ai-API/env/lib/python3.8/site-packages/transformers/models/distilbert/modeling_distilbert.pycreate_sinusoidal_embeddingsH   s    r,   c              	      s   t  fddt| D }d|_tt |d d dd df |d d dd df< tt |d d dd df |d d dd df< |  d S )Nc                    s$   g | ]  fd dt D qS )c              	      s(   g | ] }t d d|d     qS )i'     )nppower).0j)r#   posr*   r+   
<listcomp>T   s     z<_create_sinusoidal_embeddings.<locals>.<listcomp>.<listcomp>)range)r0   r#   )r2   r+   r3   T   s     z1_create_sinusoidal_embeddings.<locals>.<listcomp>Fr   r-   r   )	r.   arrayr4   Zrequires_gradr'   ZFloatTensorsincosZdetach_)r"   r#   r$   Zposition_encr*   r5   r+   r)   S   s
    44r)   c                       s@   e Zd Zed fddZdejeej ejdddZ  Z	S )	
Embeddingsconfigc                    s   t    tj|j|j|jd| _t|j|j| _	|j
rPt|j|j| j	jd tj|jdd| _t|j| _| jdt|jddd d S )	N)padding_idxr!   -q=epsposition_ids)r   F)
persistent)super__init__r   	Embedding
vocab_sizer#   Zpad_token_idword_embeddingsmax_position_embeddingsposition_embeddingssinusoidal_pos_embdsr,   weight	LayerNormDropoutdropoutZregister_bufferr'   arangeexpandselfr;   	__class__r*   r+   rD   \   s     
    zEmbeddings.__init__N)	input_idsinput_embedsreturnc                 C   s   |dk	r|  |}|d}t| dr>| jddd|f }n$tj|tj|jd}|d	|}| 
|}|| }| |}| |}|S )a  
        Parameters:
            input_ids (torch.Tensor):
                torch.tensor(bs, max_seq_length) The token ids to embed.
            input_embeds (*optional*, torch.Tensor):
                The pre-computed word embeddings. Can only be passed if the input ids are `None`.


        Returns: torch.tensor(bs, max_seq_length, dim) The embedded tokens (plus position embeddings, no token_type
        embeddings)
        Nr   r@   )dtypedevicer   )rG   sizehasattrr@   r'   rO   longrY   Z	unsqueeze	expand_asrI   rL   rN   )rR   rU   rV   Z
seq_lengthr@   rI   
embeddingsr*   r*   r+   forwardk   s    





zEmbeddings.forward)N)
__name__
__module____qualname__r   rD   r'   Tensorr   r_   __classcell__r*   r*   rS   r+   r9   [   s   r9   c                
       sh   e Zd Zed fddZee dddZdej	ej	ej	ej	e
ej	 eeej	d	f d
ddZ  ZS )MultiHeadSelfAttentionr:   c                    s   t    |j| _|j| _tj|jd| _| j| j dkrTtd| j d| j dtj	|j|jd| _
tj	|j|jd| _tj	|j|jd| _tj	|j|jd| _t | _| j| j | _d S )Npr   zself.n_heads: z must divide self.dim:  evenlyZin_featuresZout_features)rC   rD   n_headsr#   r   rM   Zattention_dropoutrN   
ValueErrorLinearq_link_linv_linout_linsetpruned_headsattention_head_sizerQ   rS   r*   r+   rD      s    
zMultiHeadSelfAttention.__init__)headsc                 C   s   t |dkrd S t|| j| j| j\}}t| j|| _t| j|| _t| j|| _t| j	|dd| _	| jt | | _| j| j | _
| j|| _d S )Nr   r   r5   )lenr   rj   rs   rr   r   rm   rn   ro   rp   r#   union)rR   rt   indexr*   r*   r+   prune_heads   s       z"MultiHeadSelfAttention.prune_headsNF.)querykeyvaluemask	head_maskoutput_attentionsrW   c                    sF  |  \ }}| d}	jj  dd|	f}
tjtjd fdd}tjtjd fdd}||}||}||}|t	 }t
||dd}|d	k|
|}||tt|jj}tjj|d
d}|}|dk	r|| }t
||}||}|}|r<||fS |fS dS )a  
        Parameters:
            query: torch.tensor(bs, seq_length, dim)
            key: torch.tensor(bs, seq_length, dim)
            value: torch.tensor(bs, seq_length, dim)
            mask: torch.tensor(bs, seq_length)

        Returns:
            weights: torch.tensor(bs, n_heads, seq_length, seq_length) Attention weights context: torch.tensor(bs,
            seq_length, dim) Contextualized layer. Optional: only if `output_attentions=True`
        r   )xrW   c                    s   |   djddS )zseparate headsrA   r   r-   )viewrj   	transposer   bsZdim_per_headrR   r*   r+   shape   s    z-MultiHeadSelfAttention.forward.<locals>.shapec                    s    |  dd  dj S )zgroup headsr   r-   rA   )r   
contiguousr   rj   r   r   r*   r+   unshape   s    z/MultiHeadSelfAttention.forward.<locals>.unshaper-   r   r   rA   r5   N)rZ   r#   rj   r'   rc   rm   rn   ro   mathsqrtmatmulr   r   r]   Zmasked_fillZtensorZfinforX   minr   Z
functionalZsoftmaxrN   rp   )rR   ry   rz   r{   r|   r}   r~   Zq_lengthr#   Zk_lengthZ
mask_reshpr   r   qkvZscoresweightscontextr*   r   r+   r_      s4    
 


zMultiHeadSelfAttention.forward)NF)r`   ra   rb   r   rD   r   intrx   r'   rc   r   boolr   r_   rd   r*   r*   rS   r+   re      s     re   c                       sJ   e Zd Zed fddZejejdddZejejdddZ  Z	S )	FFNr:   c                    s`   t    tj|jd| _|j| _d| _tj|j|j	d| _
tj|j	|jd| _t|j| _d S )Nrf   r   ri   )rC   rD   r   rM   rN   chunk_size_feed_forwardseq_len_dimrl   r#   Z
hidden_dimlin1lin2r   
activationrQ   rS   r*   r+   rD      s    
zFFN.__init__)inputrW   c                 C   s   t | j| j| j|S N)r   ff_chunkr   r   )rR   r   r*   r*   r+   r_      s    zFFN.forwardc                 C   s,   |  |}| |}| |}| |}|S r   )r   r   r   rN   )rR   r   r   r*   r*   r+   r      s
    



zFFN.ff_chunk)
r`   ra   rb   r   rD   r'   rc   r_   r   rd   r*   r*   rS   r+   r      s   	r   c                       sR   e Zd Zed fddZd
ejeej eej ee	ejdf ddd	Z
  ZS )TransformerBlockr:   c                    sp   t    |j|j dkr4td|j d|j dt|| _tj|jdd| _	t
|| _tj|jdd| _d S )Nr   zconfig.n_heads z must divide config.dim rh   r=   )Znormalized_shaper?   )rC   rD   r#   rj   rk   re   	attentionr   rL   sa_layer_normr   ffnoutput_layer_normrQ   rS   r*   r+   rD   	  s    


zTransformerBlock.__init__NF.)r   	attn_maskr}   r~   rW   c           	      C   s   | j ||||||d}|r$|\}}n(t|tkrDtdt| d|d }| || }| |}| || }|f}|r|f| }|S )ae  
        Parameters:
            x: torch.tensor(bs, seq_length, dim)
            attn_mask: torch.tensor(bs, seq_length)

        Returns:
            sa_weights: torch.tensor(bs, n_heads, seq_length, seq_length) The attention weights ffn_output:
            torch.tensor(bs, seq_length, dim) The output of the transformer block contextualization.
        )ry   rz   r{   r|   r}   r~   z$sa_output must be a tuple but it is z typer   )r   typetuple	TypeErrorr   r   r   )	rR   r   r   r}   r~   Z	sa_outputZ
sa_weightsZ
ffn_outputoutputr*   r*   r+   r_     s(    


zTransformerBlock.forward)NNF)r`   ra   rb   r   rD   r'   rc   r   r   r   r_   rd   r*   r*   rS   r+   r     s      r   c                       sb   e Zd Zed fddZd
ejeej eej eeee e	e
eejdf f ddd	Z  ZS )Transformerr:   c                    s<   t     j| _t fddt jD | _d| _d S )Nc                    s   g | ]}t  qS r*   )r   )r0   _r:   r*   r+   r3   F  s     z(Transformer.__init__.<locals>.<listcomp>F)rC   rD   Zn_layersr   Z
ModuleListr4   layergradient_checkpointingrQ   rS   r:   r+   rD   C  s    
 zTransformer.__init__NF.)r   r   r}   r~   output_hidden_statesreturn_dictrW   c                    s  |rdnd} rdnd}|}	t | jD ]\}
}|r<||	f }| jrr| jrr fdd}tjj|||	|||
 }n||	|||
  }|d }	 rt|dkrtdt| |d }||f }q&t|d	kr&td
t| q&|r||	f }|st	dd |	||fD S t
|	||dS )a  
        Parameters:
            x: torch.tensor(bs, seq_length, dim) Input sequence embedded.
            attn_mask: torch.tensor(bs, seq_length) Attention mask on the sequence.

        Returns:
            hidden_state: torch.tensor(bs, seq_length, dim) Sequence of hidden states in the last (top)
            layer all_hidden_states: Tuple[torch.tensor(bs, seq_length, dim)]
                Tuple of length n_layers with the hidden states from each layer.
                Optional: only if output_hidden_states=True
            all_attentions: Tuple[torch.tensor(bs, n_heads, seq_length, seq_length)]
                Tuple of length n_layers with the attention weights from each layer
                Optional: only if output_attentions=True
        r*   Nc                    s    fdd}|S )Nc                     s    | f S r   r*   )inputs)moduler~   r*   r+   custom_forwardk  s    zJTransformer.forward.<locals>.create_custom_forward.<locals>.custom_forwardr*   )r   r   r~   r   r+   create_custom_forwardj  s    z2Transformer.forward.<locals>.create_custom_forwardrA   r-   z7The length of the layer_outputs should be 2, but it is r   r   z7The length of the layer_outputs should be 1, but it is c                 s   s   | ]}|d k	r|V  qd S r   r*   )r0   r   r*   r*   r+   	<genexpr>  s      z&Transformer.forward.<locals>.<genexpr>)Zlast_hidden_statehidden_states
attentions)	enumerater   r   Ztrainingr'   utils
checkpointru   rk   r   r   )rR   r   r   r}   r~   r   r   Zall_hidden_statesZall_attentionshidden_stateiZlayer_moduler   Zlayer_outputsr   r*   r   r+   r_   I  sJ    

  zTransformer.forward)NNFFN)r`   ra   rb   r   rD   r'   rc   r   r   r   r   r   r_   rd   r*   r*   rS   r+   r   B  s   	     r   c                   @   s:   e Zd ZdZeZdZdZdZe	j
dddZdd	d
ZdS )DistilBertPreTrainedModelz
    An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
    models.
    N
distilbertTr   c                 C   s   t |tjr:|jjjd| jjd |jdk	r|jj	  nft |tj
rz|jjjd| jjd |jdk	r|jj|j 	  n&t |tjr|jj	  |jjd dS )zInitialize the weights.g        )ZmeanZstdNg      ?)
isinstancer   rl   rK   dataZnormal_r;   Zinitializer_rangeZbiasZzero_rE   r<   rL   Zfill_)rR   r   r*   r*   r+   _init_weights  s    

z'DistilBertPreTrainedModel._init_weightsFc                 C   s   t |tr||_d S r   )r   r   r   )rR   r   r{   r*   r*   r+   _set_gradient_checkpointing  s    
z5DistilBertPreTrainedModel._set_gradient_checkpointing)F)r`   ra   rb   __doc__r    config_classZload_tf_weightsZbase_model_prefixZsupports_gradient_checkpointingr   Moduler   r   r*   r*   r*   r+   r     s   r   aD  

    This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
    library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
    etc.)

    This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
    Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
    and behavior.

    Parameters:
        config ([`DistilBertConfig`]): Model configuration class with all the parameters of the model.
            Initializing with a config file does not load the weights associated with the model, only the
            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
a  
    Args:
        input_ids (`torch.LongTensor` of shape `({0})`):
            Indices of input sequence tokens in the vocabulary.

            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
            [`PreTrainedTokenizer.__call__`] for details.

            [What are input IDs?](../glossary#input-ids)
        attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
            Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:

            - 1 for tokens that are **not masked**,
            - 0 for tokens that are **masked**.

            [What are attention masks?](../glossary#attention-mask)
        head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
            Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:

            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.

        inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
            Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
            is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
            model's internal embedding lookup matrix.
        output_attentions (`bool`, *optional*):
            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
            tensors for more detail.
        output_hidden_states (`bool`, *optional*):
            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
            more detail.
        return_dict (`bool`, *optional*):
            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
zfThe bare DistilBERT encoder/transformer outputting raw hidden-states without any specific head on top.c                       s   e Zd Zed fddZejdddZeddd	Z	ejdd
dZ
ejdddZeeeee  f dddZeedeeeeddeej eej eej eej ee ee ee eeeejdf f dddZ  ZS )DistilBertModelr:   c                    s,   t  | t|| _t|| _|   d S r   )rC   rD   r9   r^   r   transformer	post_initrQ   rS   r*   r+   rD     s    

zDistilBertModel.__init__rW   c                 C   s   | j jS z1
        Returns the position embeddings
        )r^   rI   rR   r*   r*   r+   get_position_embeddings  s    z'DistilBertModel.get_position_embeddingsnew_num_position_embeddingsc              	   C   s   || j j }|dkrdS td| d || j _| jjj }t	| j j| j j
| j_| j jr|t| j j| j j
| jjd nPt B |dkrt|| jjjd| < nt|d| | jj_W 5 Q R X | jj| j dS )  
        Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.

        Arguments:
            new_num_position_embeddings (`int`):
                The number of new position embedding matrix. If position embeddings are learned, increasing the size
                will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
                end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
                size will add correct vectors at the end following the position encoding algorithm, whereas reducing
                the size will remove vectors from the end.
        r   Nz(Setting `config.max_position_embeddings=z`...r!   )r;   rH   loggerinfor^   rI   rK   cloner   rE   r#   rJ   r,   r'   Zno_grad	ParametertorY   )rR   r   Znum_position_embeds_diffZold_position_embeddings_weightr*   r*   r+   resize_position_embeddings  s,      

z*DistilBertModel.resize_position_embeddingsc                 C   s   | j jS r   r^   rG   r   r*   r*   r+   get_input_embeddings)  s    z$DistilBertModel.get_input_embeddingsnew_embeddingsc                 C   s   || j _d S r   r   rR   r   r*   r*   r+   set_input_embeddings,  s    z$DistilBertModel.set_input_embeddings)heads_to_prunec                 C   s*   |  D ]\}}| jj| j| qdS )z
        Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
        class PreTrainedModel
        N)itemsr   r   r   rx   )rR   r   r   rt   r*   r*   r+   _prune_heads/  s    zDistilBertModel._prune_headsbatch_size, num_choicesr   output_typer   N.)rU   attention_maskr}   inputs_embedsr~   r   r   rW   c                 C   s   |d k	r|n| j j}|d k	r |n| j j}|d k	r4|n| j j}|d k	rV|d k	rVtdn@|d k	rt| || | }n"|d k	r| d d }ntd|d k	r|jn|j}	|d krtj	||	d}| 
|| j j}| ||}
| j|
|||||dS )NzDYou cannot specify both input_ids and inputs_embeds at the same timerA   z5You have to specify either input_ids or inputs_embeds)rY   )r   r   r}   r~   r   r   )r;   r~   r   use_return_dictrk   Z%warn_if_padding_and_no_attention_maskrZ   rY   r'   ZonesZget_head_maskZnum_hidden_layersr^   r   )rR   rU   r   r}   r   r~   r   r   Zinput_shaperY   r^   r*   r*   r+   r_   7  s2    

zDistilBertModel.forward)NNNNNNN)r`   ra   rb   r   rD   r   rE   r   r   r   r   r   r   r   r   r   DISTILBERT_INPUTS_DOCSTRINGformatr   _CHECKPOINT_FOR_DOCr   _CONFIG_FOR_DOCr   r'   rc   r   r   r   r_   rd   r*   r*   rS   r+   r     s:   	*       r   z?DistilBert Model with a `masked language modeling` head on top.c                       s   e Zd ZdgZed fddZejdddZe	dd	d
Z
ejdddZejdddZeedeeeeddeej eej eej eej eej ee ee ee eeeejdf f d	ddZ  ZS )DistilBertForMaskedLMzvocab_projector.weightr:   c                    sn   t  | t|j| _t|| _t|j|j| _	tj
|jdd| _t|j|j| _|   t | _d S )Nr=   r>   )rC   rD   r   r   r   r   r   rl   r#   vocab_transformrL   vocab_layer_normrF   vocab_projectorr   r
   mlm_loss_fctrQ   rS   r*   r+   rD   r  s    
zDistilBertForMaskedLM.__init__r   c                 C   s
   | j  S r   r   r   r   r*   r*   r+   r     s    z-DistilBertForMaskedLM.get_position_embeddingsr   c                 C   s   | j | dS r   Nr   r   rR   r   r*   r*   r+   r     s    z0DistilBertForMaskedLM.resize_position_embeddingsc                 C   s   | j S r   r   r   r*   r*   r+   get_output_embeddings  s    z+DistilBertForMaskedLM.get_output_embeddingsr   c                 C   s
   || _ d S r   r   r   r*   r*   r+   set_output_embeddings  s    z+DistilBertForMaskedLM.set_output_embeddingsr   r   N.	rU   r   r}   r   labelsr~   r   r   rW   c	              	   C   s   |dk	r|n| j j}| j|||||||d}	|	d }
| |
}| |}| |}| |}d}|dk	r| |d|	d|d}|s|f|	dd  }|dk	r|f| S |S t
|||	j|	jdS )a  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
            config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
            loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
        NrU   r   r}   r   r~   r   r   r   rA   r   losslogitsr   r   )r;   r   r   r   r   r   r   r   r   rZ   r   r   r   )rR   rU   r   r}   r   r   r~   r   r   Zdlbrt_outputr   Zprediction_logitsZmlm_lossr   r*   r*   r+   r_     s6    	



 zDistilBertForMaskedLM.forward)NNNNNNNN)r`   ra   rb   Z_tied_weights_keysr   rD   r   rE   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r'   rc   
LongTensorr   r   r   r_   rd   r*   r*   rS   r+   r   k  s>           r   z
    DistilBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the
    pooled output) e.g. for GLUE tasks.
    c                       s   e Zd Zed fddZejdddZeddd	Z	e
ed
eeeeddeej eej eej eej eej ee ee ee eeeejdf f d	ddZ  ZS )#DistilBertForSequenceClassificationr:   c                    sb   t  | |j| _|| _t|| _t|j|j| _	t|j|j| _
t|j| _|   d S r   )rC   rD   
num_labelsr;   r   r   r   rl   r#   pre_classifier
classifierrM   seq_classif_dropoutrN   r   rQ   rS   r*   r+   rD     s    
z,DistilBertForSequenceClassification.__init__r   c                 C   s
   | j  S r   r   r   r*   r*   r+   r     s    z;DistilBertForSequenceClassification.get_position_embeddingsr   c                 C   s   | j | dS r   r   r   r*   r*   r+   r     s    z>DistilBertForSequenceClassification.resize_position_embeddingszbatch_size, sequence_lengthr   N.r   c	              	   C   s  |dk	r|n| j j}| j|||||||d}	|	d }
|
dddf }| |}t |}| |}| |}d}|dk	r^| j jdkr| j	dkrd| j _n4| j	dkr|j
tjks|j
tjkrd| j _nd| j _| j jdkrt }| j	dkr|| | }n
|||}nN| j jdkr@t }||d| j	|d}n| j jdkr^t }|||}|s|f|	dd  }|dk	r|f| S |S t|||	j|	jd	S )
a  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
        Nr   r   r   Z
regressionZsingle_label_classificationZmulti_label_classificationrA   r   )r;   r   r   r   r   ReLUrN   r   Zproblem_typer   rX   r'   r\   r   r   squeezer
   r   r	   r   r   r   )rR   rU   r   r}   r   r   r~   r   r   distilbert_outputr   pooled_outputr   r   loss_fctr   r*   r*   r+   r_     sX    	





"

z+DistilBertForSequenceClassification.forward)NNNNNNNN)r`   ra   rb   r   rD   r   rE   r   r   r   r   r   r   r   r   r   r   r   r'   rc   r   r   r   r   r_   rd   r*   r*   rS   r+   r     s8           r   z
    DistilBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a
    linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
    c                       s   e Zd Zed fddZejdddZeddd	Z	e
ed
eeeeddeej eej eej eej eej eej ee ee ee eeeejdf f d
ddZ  ZS )DistilBertForQuestionAnsweringr:   c                    s\   t  | t|| _t|j|j| _|jdkrBt	d|j t
|j| _|   d S )Nr-   z)config.num_labels should be 2, but it is )rC   rD   r   r   r   rl   r#   r   
qa_outputsrk   rM   Z
qa_dropoutrN   r   rQ   rS   r*   r+   rD   P  s    

z'DistilBertForQuestionAnswering.__init__r   c                 C   s
   | j  S r   r   r   r*   r*   r+   r   ]  s    z6DistilBertForQuestionAnswering.get_position_embeddingsr   c                 C   s   | j | dS r   r   r   r*   r*   r+   r   c  s    z9DistilBertForQuestionAnswering.resize_position_embeddingsr   r   N.)
rU   r   r}   r   start_positionsend_positionsr~   r   r   rW   c
              	   C   sX  |	dk	r|	n| j j}	| j|||||||	d}
|
d }| |}| |}|jddd\}}|d }|d }d}|dk	r|dk	rt|	 dkr|d}t|	 dkr|d}|	d}|
d|}|
d|}tj|d}|||}|||}|| d }|	sB||f|
dd  }|dk	r>|f| S |S t||||
j|
jd	S )
a  
        start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for position (index) of the start of the labelled span for computing the token classification loss.
            Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
            are not taken into account for computing the loss.
        end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for position (index) of the end of the labelled span for computing the token classification loss.
            Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
            are not taken into account for computing the loss.
        Nr   r   r   rA   r5   )Zignore_indexr-   )r   start_logits
end_logitsr   r   )r;   r   r   rN   r  splitr   r   ru   rZ   clampr   r
   r   r   r   )rR   rU   r   r}   r   r  r  r~   r   r   r   r   r   r  r  Z
total_lossZignored_indexr  Z
start_lossZend_lossr   r*   r*   r+   r_   q  sN    	






z&DistilBertForQuestionAnswering.forward)	NNNNNNNNN)r`   ra   rb   r   rD   r   rE   r   r   r   r   r   r   r   r   r   r   r   r'   rc   r   r   r   r_   rd   r*   r*   rS   r+   r  H  s<            r  z
    DistilBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
    for Named-Entity-Recognition (NER) tasks.
    c                       s   e Zd Zed fddZejdddZeddd	Z	e
eeeeed
deej eej eej eej eej ee ee ee eeeejdf f d	ddZ  ZS ) DistilBertForTokenClassificationr:   c                    sJ   t  | |j| _t|| _t|j| _t|j	|j| _
|   d S r   )rC   rD   r   r   r   r   rM   rN   rl   Zhidden_sizer   r   rQ   rS   r*   r+   rD     s    
z)DistilBertForTokenClassification.__init__r   c                 C   s
   | j  S r   r   r   r*   r*   r+   r     s    z8DistilBertForTokenClassification.get_position_embeddingsr   c                 C   s   | j | dS r   r   r   r*   r*   r+   r     s    z;DistilBertForTokenClassification.resize_position_embeddingsr   N.r   c	              	   C   s   |dk	r|n| j j}| j|||||||d}	|	d }
| |
}
| |
}d}|dk	rtt }||d| j|d}|s|f|	dd  }|dk	r|f| S |S t|||	j	|	j
dS )z
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
        Nr   r}   r   r~   r   r   r   rA   r   r   )r;   r   r   rN   r   r
   r   r   r   r   r   )rR   rU   r   r}   r   r   r~   r   r   outputsZsequence_outputr   r   r  r   r*   r*   r+   r_     s4    


z(DistilBertForTokenClassification.forward)NNNNNNNN)r`   ra   rb   r   rD   r   rE   r   r   r   r   r   r   r   r   r   r   r'   rc   r   r   r   r   r_   rd   r*   r*   rS   r+   r    s8           r  z
    DistilBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and
    a softmax) e.g. for RocStories/SWAG tasks.
    c                       s   e Zd Zed fddZejdddZeddd	Z	e
ed
eeeddeej eej eej eej eej ee ee ee eeeejdf f d	ddZ  ZS )DistilBertForMultipleChoicer:   c                    sR   t  | t|| _t|j|j| _t|jd| _t	|j
| _|   d S )Nr   )rC   rD   r   r   r   rl   r#   r   r   rM   r   rN   r   rQ   rS   r*   r+   rD   $  s    
z$DistilBertForMultipleChoice.__init__r   c                 C   s
   | j  S r   r   r   r*   r*   r+   r   /  s    z3DistilBertForMultipleChoice.get_position_embeddingsr   c                 C   s   | j | dS )a  
        Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.

        Arguments:
            new_num_position_embeddings (`int`)
                The number of new position embeddings. If position embeddings are learned, increasing the size will add
                newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
                position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
                add correct vectors at the end following the position encoding algorithm, whereas reducing the size
                will remove vectors from the end.
        Nr   r   r*   r*   r+   r   5  s    z6DistilBertForMultipleChoice.resize_position_embeddingsz(batch_size, num_choices, sequence_length)r   r   N.r   c	              	   C   sZ  |dk	r|n| j j}|dk	r&|jd n|jd }	|dk	rJ|d|dnd}|dk	rh|d|dnd}|dk	r|d|d|dnd}| j|||||||d}
|
d }|dddf }| |}t |}| 	|}| 
|}|d|	}d}|dk	rt }|||}|sF|f|
dd  }|dk	rB|f| S |S t|||
j|
jdS )aW  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
            num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
            `input_ids` above)

        Returns:

        Examples:

        ```python
        >>> from transformers import AutoTokenizer, DistilBertForMultipleChoice
        >>> import torch

        >>> tokenizer = AutoTokenizer.from_pretrained("distilbert-base-cased")
        >>> model = DistilBertForMultipleChoice.from_pretrained("distilbert-base-cased")

        >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
        >>> choice0 = "It is eaten with a fork and a knife."
        >>> choice1 = "It is eaten while held in the hand."
        >>> labels = torch.tensor(0).unsqueeze(0)  # choice0 is correct (according to Wikipedia ;)), batch size 1

        >>> encoding = tokenizer([[prompt, choice0], [prompt, choice1]], return_tensors="pt", padding=True)
        >>> outputs = model(**{k: v.unsqueeze(0) for k, v in encoding.items()}, labels=labels)  # batch size is 1

        >>> # the linear classifier still needs to be trained
        >>> loss = outputs.loss
        >>> logits = outputs.logits
        ```Nr   rA   r  r   r   )r;   r   r   r   rZ   r   r   r   r   rN   r   r
   r   r   r   )rR   rU   r   r}   r   r   r~   r   r   Znum_choicesr  r   r  r   Zreshaped_logitsr   r  r   r*   r*   r+   r_   C  sJ    ,





z#DistilBertForMultipleChoice.forward)NNNNNNNN)r`   ra   rb   r   rD   r   rE   r   r   r   r   r   r   r   r   r   r   r'   rc   r   r   r   r   r_   rd   r*   r*   rS   r+   r    s4   
        r  )Er   r   typingr   r   r   r   r   r   numpyr.   r'   r   Ztorch.nnr	   r
   r   Zactivationsr   Zconfiguration_utilsr   Zintegrations.deepspeedr   Zmodeling_outputsr   r   r   r   r   r   Zmodeling_utilsr   Zpytorch_utilsr   r   r   r   r   r   r   r   r   Zconfiguration_distilbertr    Z
get_loggerr`   r   r   r   Z(DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LISTr   rc   r,   r)   r   r9   re   r   r   r   r   ZDISTILBERT_START_DOCSTRINGr   r   r   r   r  r  r  r*   r*   r*   r+   <module>   s     
2f:T %|dnoW