U
    ,È-e* ã                   @   s2  d Z ddlZddlZddlZddlmZ ddlmZmZm	Z	 ddl
Z
ddlZ
ddl
mZ ddlmZ ddlmZmZmZ dd	lmZ dd
lmZmZmZmZmZ ddlmZmZ ddlmZm Z m!Z! ddl"m#Z#m$Z$m%Z%m&Z&m'Z'm(Z( ddl)m*Z*m+Z+ ddl,m-Z- e' .e/¡Z0dZ1dZ2dddddgZ3dd„ Z4G dd„ dej5ƒZ6G dd„ dej5ƒZ7G dd„ dej5ƒZ8G dd „ d eƒZ9eG d!d"„ d"e#ƒƒZ:d#Z;d$Z<d%Z=d&Z>e%d'e;ƒG d(d)„ d)e9ƒƒZ?e%d*e;ƒG d+d,„ d,e9ƒƒZ@e%d-e;ƒG d.d/„ d/e9ƒƒZAe%d0e;ƒG d1d2„ d2e9ƒƒZBe%d3e;ƒG d4d5„ d5e9ƒƒZCe%d6e;ƒG d7d8„ d8e9ƒƒZDdS )9zPyTorch OpenAI GPT-2 model.é    N)Ú	dataclass)ÚOptionalÚTupleÚUnion)Únn)Úautocast)ÚBCEWithLogitsLossÚCrossEntropyLossÚMSELossé   )ÚACT2FN)Ú)BaseModelOutputWithPastAndCrossAttentionsÚ!CausalLMOutputWithCrossAttentionsÚQuestionAnsweringModelOutputÚ SequenceClassifierOutputWithPastÚTokenClassifierOutput)ÚPreTrainedModelÚSequenceSummary)ÚConv1DÚ find_pruneable_heads_and_indicesÚprune_conv1d_layer)ÚModelOutputÚadd_code_sample_docstringsÚadd_start_docstringsÚ%add_start_docstrings_to_model_forwardÚloggingÚreplace_return_docstrings)Úassert_device_mapÚget_device_mapé   )Ú
GPT2ConfigZgpt2r    zgpt2-mediumz
gpt2-largezgpt2-xlZ
distilgpt2c                 C   s<  zddl }ddl}W n  tk
r4   t d¡ ‚ Y nX tj |¡}t d|› ¡ |j	 
|¡}g }g }|D ]D\}	}
t d|	› d|
› ¡ |j	 ||	¡}| |	¡ | | ¡ ¡ qjt||ƒD ]z\}	}|	dd… }	|	 d¡}	| }|	D ]Ì}| d	|¡r| d
|¡}n|g}|d dks$|d dkr0t|dƒ}n^|d dkrJt|dƒ}nD|d dksf|d dkr€t||d ƒ}t|dƒ}nt||d ƒ}t|ƒdkrât|d ƒ}|| }qâz,|j|jkrÚtd|j› d|j› dƒ‚W n< tk
r } z| j|j|jf7  _‚ W 5 d}~X Y nX t d|	› ¡ t |¡|_qº| S )z&Load tf checkpoints in a pytorch modelr   Nz™Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.z&Converting TensorFlow checkpoint from zLoading TF weight z with shape é   ú/z[A-Za-z]+\d+z(\d+)ÚwÚgÚweightÚbÚbiasÚwpeÚwteé   r   zPointer shape z and array shape z mismatchedzInitialize PyTorch weight )ÚreZ
tensorflowÚImportErrorÚloggerÚerrorÚosÚpathÚabspathÚinfoÚtrainZlist_variablesZload_variableÚappendÚsqueezeÚzipÚsplitÚ	fullmatchÚgetattrÚlenÚintÚshapeÚ
ValueErrorÚargsÚtorchZ
from_numpyÚdata)ÚmodelÚconfigZgpt2_checkpoint_pathr+   ÚtfZtf_pathZ	init_varsÚnamesZarraysÚnamer<   ÚarrayZpointerZm_nameZscope_namesÚnumÚe© rI   úg/var/www/html/Darija-Ai-Train/env/lib/python3.8/site-packages/transformers/models/gpt2/modeling_gpt2.pyÚload_tf_weights_in_gpt2C   s\    ÿ


rK   c                       s¶   e Zd Zd‡ fdd„	Zdd„ Zddd„Zdd	d
„Zdd„ Zdd„ Zde	e
ej  e	e
ej  e	ej e	ej e	ej e	ej e	e e	e e
eeje
ej f df dœ	dd„Z‡  ZS )ÚGPT2AttentionFNc                    sF  t ƒ  ¡  |j}| jdt tj||ftjd¡ dd||¡dd | jdt 	d¡dd |j
| _|j| _| j| j | _| j| _| j| j | jkrªtd| j› d	| j› d
ƒ‚|j| _|| _|j| _|| _|j| _| jrútd| j | jƒ| _t| j| jƒ| _ntd| j | jƒ| _t| j| jƒ| _t |j¡| _t |j¡| _t ƒ | _!d S )Nr'   ©Údtyper   F)Ú
persistentZmasked_biasg     ˆÃÀz=`embed_dim` must be divisible by num_heads (got `embed_dim`: z and `num_heads`: z).r*   r   )"ÚsuperÚ__init__Úmax_position_embeddingsZregister_bufferr?   ZtrilÚonesÚboolÚviewÚtensorÚhidden_sizeÚ	embed_dimZnum_attention_headsÚ	num_headsÚhead_dimÚ
split_sizer=   Úscale_attn_weightsÚis_cross_attentionÚscale_attn_by_inverse_layer_idxÚ	layer_idxÚreorder_and_upcast_attnr   Úc_attnÚq_attnÚc_projr   ÚDropoutZ
attn_pdropÚattn_dropoutÚresid_pdropÚresid_dropoutÚsetÚpruned_heads)ÚselfrB   r]   r_   Zmax_positions©Ú	__class__rI   rJ   rQ   |   sD    
   ÿûÿzGPT2Attention.__init__c                 C   sª   t |ƒdkrd S t|| j| j| jƒ\}}t ||| j |d| j  g¡}t| j	|dd| _	t| j
|dd| _
| j| j | jt |ƒ  | _| jt |ƒ | _| j |¡| _d S )Nr   r*   r   ©Údim)r:   r   rY   rZ   ri   r?   Úcatr[   r   ra   rc   Úunion)rj   ÚheadsÚindexZ
index_attnrI   rI   rJ   Úprune_heads§   s     zGPT2Attention.prune_headsc                 C   s2  t  || dd¡¡}| jr>|t jg | d¡d |j|jd }| jrV|t	| j
d ƒ }| jsÖ| d¡| d¡ }}| jd d …d d …|| |…d |…f }	t  |j¡j}
t jg |
|jd |j¡}
t  |	| |j¡|
¡}|d k	ræ|| }tjj|dd}| |j¡}|  |¡}|d k	r|| }t  ||¡}||fS )Néÿÿÿÿéþÿÿÿç      à?©rN   Údevicer   rM   rm   )r?   ÚmatmulÚ	transposer\   ÚfullÚsizerN   rx   r^   Úfloatr_   r]   r'   ÚfinfoÚminÚtoÚwherer   Ú
functionalÚsoftmaxÚtypere   )rj   ÚqueryÚkeyÚvalueÚattention_maskÚ	head_maskÚattn_weightsÚquery_lengthÚ
key_lengthÚcausal_maskÚ
mask_valueÚattn_outputrI   rI   rJ   Ú_attn¶   s2       ÿ&

zGPT2Attention._attnc              	   C   sÈ  |  ¡ \}}}}	|  ¡ \}
}
}}
tj|| ||tj|jd}d}| jr\|t|  d¡ƒd  }| jrt|t| jd ƒ }t	ddX | 
d||	¡| dd¡ 
d|	|¡ }}tj|| ¡ | ¡ d	|d
}| 
||||¡}W 5 Q R X | jsT|  d¡|  d¡ }}| jd d …d d …|| |…d |…f }t |j¡j}tj||jd |j¡}t |||¡}|d k	rf|| }tjj|dd}|jtjkrŒtdƒ‚| |j¡}|  |¡}|d k	r´|| }t ||¡}||fS )Nrw   ç      ð?rt   rv   r   F)Úenabledru   r   )ÚbetaÚalpharM   rm   zDError with upcasting, attn_weights does not have dtype torch.float32)r|   r?   ÚemptyZfloat32rx   r\   r}   r^   r_   r   Zreshaperz   Zbaddbmmr]   r'   r~   rN   r   rV   r€   r   r   r‚   rƒ   ÚRuntimeErrorr„   re   ry   )rj   r…   r†   r‡   rˆ   r‰   ZbszrY   Z	q_seq_lenZdkÚ_Z	k_seq_lenrŠ   Zscale_factorÚqÚkr‹   rŒ   r   rŽ   r   rI   rI   rJ   Ú_upcast_and_reordered_attnÞ   s:    &&


z(GPT2Attention._upcast_and_reordered_attnc                 C   s2   |  ¡ dd… ||f }| |¡}| dddd¡S )zJ
        Splits hidden_size dim into attn_head_size and num_heads
        Nrt   r   r*   r   r   )r|   rU   Úpermute©rj   rV   rY   Zattn_head_sizeZ	new_shaperI   rI   rJ   Ú_split_heads  s    
zGPT2Attention._split_headsc                 C   s8   |  dddd¡ ¡ }| ¡ dd… || f }| |¡S )zS
        Merges attn_head_size dim and num_attn_heads dim into hidden_size
        r   r*   r   r   Nru   )r›   Ú
contiguousr|   rU   rœ   rI   rI   rJ   Ú_merge_heads  s    zGPT2Attention._merge_heads.©	Úhidden_statesÚ
layer_pastrˆ   r‰   Úencoder_hidden_statesÚencoder_attention_maskÚ	use_cacheÚoutput_attentionsÚreturnc	                 C   sV  |d k	rDt | dƒstdƒ‚|  |¡}	|  |¡j| jdd\}
}|}n|  |¡j| jdd\}	}
}|  |	| j| j¡}	|  |
| j| j¡}
|  || j| j¡}|d k	rÊ|\}}t	j
||
fdd}
t	j
||fdd}|dkrÜ|
|f}nd }| jrþ|  |	|
|||¡\}}n|  |	|
|||¡\}}|  || j| j¡}|  |¡}|  |¡}||f}|rR||f7 }|S )Nrb   z§If class is used as cross attention, the weights `q_attn` have to be defined. Please make sure to instantiate class with `GPT2Attention(..., is_cross_attention=True)`.r*   rm   ru   T)Úhasattrr=   rb   ra   r7   r[   r   rY   rZ   r?   ro   r`   rš   r   rŸ   rc   rg   )rj   r¡   r¢   rˆ   r‰   r£   r¤   r¥   r¦   r…   r†   r‡   Zpast_keyZ
past_valueZpresentr   rŠ   ÚoutputsrI   rI   rJ   Úforward"  s:    
ÿ




zGPT2Attention.forward)FN)NN)NN)NNNNNFF)Ú__name__Ú
__module__Ú__qualname__rQ   rs   r   rš   r   rŸ   r   r   r?   ÚFloatTensorÚTensorrT   r   rª   Ú__classcell__rI   rI   rk   rJ   rL   {   s0   +
(
4       ÷örL   c                       s8   e Zd Z‡ fdd„Zeeej  ejdœdd„Z‡  Z	S )ÚGPT2MLPc                    sF   t ƒ  ¡  |j}t||ƒ| _t||ƒ| _t|j | _t	 
|j¡| _d S ©N)rP   rQ   rW   r   Úc_fcrc   r   Zactivation_functionÚactr   rd   rf   Údropout)rj   Zintermediate_sizerB   rX   rk   rI   rJ   rQ   Y  s    
zGPT2MLP.__init__)r¡   r§   c                 C   s,   |   |¡}|  |¡}|  |¡}|  |¡}|S r²   )r³   r´   rc   rµ   )rj   r¡   rI   rI   rJ   rª   a  s
    



zGPT2MLP.forward)
r«   r¬   r­   rQ   r   r   r?   r®   rª   r°   rI   rI   rk   rJ   r±   X  s   r±   c                       s˜   e Zd Zd	‡ fdd„	Zd
eeej  eeej  eej eej eej eej ee	 ee	 e
eej eeejeejdf f  f dœ	dd„Z‡  ZS )Ú	GPT2BlockNc                    s’   t ƒ  ¡  |j}|jd k	r |jnd| }tj||jd| _t||d| _	tj||jd| _
|jr‚t|d|d| _tj||jd| _t||ƒ| _d S )Né   ©Zeps©r_   T)r]   r_   )rP   rQ   rW   Zn_innerr   Ú	LayerNormÚlayer_norm_epsilonÚln_1rL   ÚattnÚln_2Úadd_cross_attentionÚcrossattentionÚln_cross_attnr±   Úmlp)rj   rB   r_   rW   Z	inner_dimrk   rI   rJ   rQ   j  s    
zGPT2Block.__init__F.r    c	                 C   sì   |}	|   |¡}| j||||||d}
|
d }|
dd … }||	 }|d k	r¦t| dƒsbtd| › dƒ‚|}	|  |¡}| j||||||d}|d }|	| }||dd …  }|}	|  |¡}|  |¡}|	| }|rÖ|f| }n|f|dd …  }|S )	N)r¢   rˆ   r‰   r¥   r¦   r   r   rÀ   z'If `encoder_hidden_states` are passed, z` has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`)rˆ   r‰   r£   r¤   r¦   r*   )r¼   r½   r¨   r=   rÁ   rÀ   r¾   rÂ   )rj   r¡   r¢   rˆ   r‰   r£   r¤   r¥   r¦   ZresidualZattn_outputsr   r©   Zcross_attn_outputsZfeed_forward_hidden_statesrI   rI   rJ   rª   y  sN    
ú

ÿ
ú

zGPT2Block.forward)N)NNNNNFF)r«   r¬   r­   rQ   r   r   r?   r®   r¯   rT   r   rª   r°   rI   rI   rk   rJ   r¶   i  s&          ÷(ör¶   c                       sP   e Zd ZdZeZeZdZdZ	dZ
dgZdZ‡ fdd„Zdd	„ Zddd„Z‡  ZS )ÚGPT2PreTrainedModelz†
    An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
    models.
    ÚtransformerTr¶   Úpast_key_valuesc                    s   t ƒ j||Ž d S r²   )rP   rQ   )rj   ÚinputsÚkwargsrk   rI   rJ   rQ   Å  s    zGPT2PreTrainedModel.__init__c                 C   sè   t |tjtfƒr>|jjjd| jjd |j	dk	r¤|j	j 
¡  nft |tjƒr~|jjjd| jjd |jdk	r¤|jj|j  
¡  n&t |tjƒr¤|j	j 
¡  |jj d¡ | ¡ D ]6\}}|dkr¬|jjd| jjt d| jj ¡ d q¬dS )zInitialize the weights.g        )ZmeanZstdNr‘   zc_proj.weightr*   )Ú
isinstancer   ÚLinearr   r%   r@   Znormal_rB   Zinitializer_ranger'   Zzero_Ú	EmbeddingZpadding_idxrº   Zfill_Znamed_parametersÚmathÚsqrtÚn_layer)rj   ÚmodulerE   ÚprI   rI   rJ   Ú_init_weightsÈ  s    

z!GPT2PreTrainedModel._init_weightsFc                 C   s   t |tƒr||_d S r²   )rÈ   Ú	GPT2ModelÚgradient_checkpointing)rj   rÎ   r‡   rI   rI   rJ   Ú_set_gradient_checkpointingã  s    
z/GPT2PreTrainedModel._set_gradient_checkpointing)F)r«   r¬   r­   Ú__doc__r    Úconfig_classrK   Zload_tf_weightsZbase_model_prefixZis_parallelizableZsupports_gradient_checkpointingZ_no_split_modulesZ_skip_keys_device_placementrQ   rÐ   rÓ   r°   rI   rI   rk   rJ   rÃ   ·  s   rÃ   c                   @   s˜   e Zd ZU dZdZeej ed< dZ	eej ed< dZ
ejed< dZejed< dZeeeej   ed< dZeeej  ed< dZeeej  ed	< dS )
ÚGPT2DoubleHeadsModelOutputa…  
    Base class for outputs of models predicting if two sentences are consecutive or not.

    Args:
        loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
            Language modeling loss.
        mc_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `mc_labels` is provided):
            Multiple choice classification loss.
        logits (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, config.vocab_size)`):
            Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
        mc_logits (`torch.FloatTensor` of shape `(batch_size, num_choices)`):
            Prediction scores of the multiple choice classification head (scores for each choice before SoftMax).
        past_key_values (`Tuple[Tuple[torch.Tensor]]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
            Tuple of length `config.n_layers`, containing tuples of tensors of shape `(batch_size, num_heads,
            sequence_length, embed_size_per_head)`).

            Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see
            `past_key_values` input) to speed up sequential decoding.
        hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
            shape `(batch_size, sequence_length, hidden_size)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
        attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
            Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
            sequence_length)`.

            GPT2Attentions weights after the attention softmax, used to compute the weighted average in the
            self-attention heads.
    NÚlossÚmc_lossÚlogitsÚ	mc_logitsrÅ   r¡   Ú
attentions)r«   r¬   r­   rÔ   r×   r   r?   r®   Ú__annotations__rØ   rÙ   rÚ   rÅ   r   r¡   rÛ   rI   rI   rI   rJ   rÖ   è  s   
rÖ   a>  

    This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
    library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
    etc.)

    This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
    Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
    and behavior.

    Parameters:
        config ([`GPT2Config`]): Model configuration class with all the parameters of the model.
            Initializing with a config file does not load the weights associated with the model, only the
            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
aÿ  
    Args:
        input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
            `input_ids_length` = `sequence_length` if `past_key_values` is `None` else
            `past_key_values[0][0].shape[-2]` (`sequence_length` of input past key value states). Indices of input
            sequence tokens in the vocabulary.

            If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
            `input_ids`.

            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
            [`PreTrainedTokenizer.__call__`] for details.

            [What are input IDs?](../glossary#input-ids)
        past_key_values (`Tuple[Tuple[torch.Tensor]]` of length `config.n_layers`):
            Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see
            `past_key_values` output below). Can be used to speed up sequential decoding. The `input_ids` which have
            their past given to this model should not be passed as `input_ids` as they have already been computed.
        attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:

            - 1 for tokens that are **not masked**,
            - 0 for tokens that are **masked**.

            If `past_key_values` is used, `attention_mask` needs to contain the masking strategy that was used for
            `past_key_values`. In other words, the `attention_mask` always has to have the length:
            `len(past_key_values) + len(input_ids)`

            [What are attention masks?](../glossary#attention-mask)
        token_type_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`, *optional*):
            Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
            1]`:

            - 0 corresponds to a *sentence A* token,
            - 1 corresponds to a *sentence B* token.

            [What are token type IDs?](../glossary#token-type-ids)
        position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
            config.max_position_embeddings - 1]`.

            [What are position IDs?](../glossary#position-ids)
        head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
            Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:

            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.

        inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
            Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
            is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
            model's internal embedding lookup matrix.

            If `past_key_values` is used, optionally only the last `inputs_embeds` have to be input (see
            `past_key_values`).
        use_cache (`bool`, *optional*):
            If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
            `past_key_values`).
        output_attentions (`bool`, *optional*):
            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
            tensors for more detail.
        output_hidden_states (`bool`, *optional*):
            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
            more detail.
        return_dict (`bool`, *optional*):
            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
a_  
    This is an experimental feature and is a subject to change at a moment's notice.

    Uses a device map to distribute attention modules of the model across several devices. If no device map is given,
    it will evenly distribute blocks across all devices.

    Args:
        device_map (`Dict[int, list]`, optional, defaults to None):
            A dictionary that maps attention modules to devices. Note that the embedding module and LMHead are always
            automatically mapped to the first device (for esoteric reasons). That means that the first device should
            have fewer attention modules mapped to it than other devices. For reference, the gpt2 models have the
            following number of attention modules:

                - gpt2: 12
                - gpt2-medium: 24
                - gpt2-large: 36
                - gpt2-xl: 48

    Example:

    ```python
    # Here is an example of a device map on a machine with 4 GPUs using gpt2-xl, which has a total of 48 attention modules:
    model = GPT2LMHeadModel.from_pretrained("gpt2-xl")
    device_map = {
        0: [0, 1, 2, 3, 4, 5, 6, 7, 8],
        1: [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21],
        2: [22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34],
        3: [35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47],
    }
    model.parallelize(device_map)
    ```
aO  
    Moves the model to cpu from a model parallel state.

    Example:

    ```python
    # On a 4 GPU machine with gpt2-large:
    model = GPT2LMHeadModel.from_pretrained("gpt2-large")
    device_map = {
        0: [0, 1, 2, 3, 4, 5, 6, 7],
        1: [8, 9, 10, 11, 12, 13, 14, 15],
        2: [16, 17, 18, 19, 20, 21, 22, 23],
        3: [24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35],
    }
    model.parallelize(device_map)  # Splits the model across several devices
    model.deparallelize()  # Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache()
    ```
z^The bare GPT2 Model transformer outputting raw hidden-states without any specific head on top.c                       sì   e Zd Z‡ fdd„Zeeƒddd„ƒZeeƒdd„ ƒZdd	„ Z	d
d„ Z
dd„ Zeeƒeeeeddeej eeeej   eej eej eej eej eej eej eej ee ee ee ee eeef dœdd„ƒƒZ‡  ZS )rÑ   c                    s˜   t ƒ  ˆ ¡ ˆ j| _t ˆ j| j¡| _t ˆ j| j¡| _	t 
ˆ j¡| _t ‡ fdd„tˆ jƒD ƒ¡| _tj| jˆ jd| _d| _d | _d| _|  ¡  d S )Nc                    s   g | ]}t ˆ |d ‘qS )r¹   )r¶   )Ú.0Úi©rB   rI   rJ   Ú
<listcomp>§  s     z&GPT2Model.__init__.<locals>.<listcomp>r¸   F)rP   rQ   rW   rX   r   rÊ   Ú
vocab_sizer)   rR   r(   rd   Z
embd_pdropÚdropZ
ModuleListÚrangeZnum_hidden_layersÚhrº   r»   Úln_fÚmodel_parallelÚ
device_maprÒ   Ú	post_init©rj   rB   rk   rß   rJ   rQ   ž  s     zGPT2Model.__init__Nc                 C   s  t  dt¡ |d kr.tt| jƒttj 	¡ ƒƒn|| _
t| j
t| jƒƒ d| _d| j
 ¡ kr^dndtt| j
 ¡ ƒƒ | _dtt| j
 ¡ ƒƒ | _| j | j¡| _| j | j¡| _| j
 ¡ D ]4\}}|D ]&}dt|ƒ }| j|  |¡| j|< qÄq¸| j | j¡| _d S )Na6  `GPT2Model.parallelize` is deprecated and will be removed in v5 of Transformers, you should load your model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own `device_map` but it needs to be a dictionary module_name to device, so for instance {'h.0': 0, 'h.1': 1, ...}TÚcpuúcuda:)ÚwarningsÚwarnÚFutureWarningr   r:   rä   rã   r?   ÚcudaÚdevice_countrç   r   ræ   ÚkeysÚstrr   Úfirst_deviceÚmaxÚlast_devicer)   r€   r(   Úitemsrå   )rj   rç   r™   ÚvÚblockZcuda_devicerI   rI   rJ   Úparallelize²  s"    û$ÿ*zGPT2Model.parallelizec                 C   s†   t  dt¡ d| _d | _d| _d| _| j d¡| _| j	 d¡| _	t
t| jƒƒD ]}| j|  d¡| j|< qN| j d¡| _tj ¡  d S )Nú\Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.Frê   )rì   rí   rî   ræ   rç   ró   rõ   r)   r€   r(   rã   r:   rä   rå   r?   rï   Úempty_cache)rj   rr   rI   rI   rJ   ÚdeparallelizeÍ  s    þzGPT2Model.deparallelizec                 C   s   | j S r²   ©r)   ©rj   rI   rI   rJ   Úget_input_embeddingsÞ  s    zGPT2Model.get_input_embeddingsc                 C   s
   || _ d S r²   rý   ©rj   Znew_embeddingsrI   rI   rJ   Úset_input_embeddingsá  s    zGPT2Model.set_input_embeddingsc                 C   s(   |  ¡ D ]\}}| j| j |¡ qdS )zv
        Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
        N)rö   rä   r½   rs   )rj   Zheads_to_pruneÚlayerrq   rI   rI   rJ   Ú_prune_headsä  s    zGPT2Model._prune_heads©Ú
checkpointÚoutput_typerÕ   )Ú	input_idsrÅ   rˆ   Útoken_type_idsÚposition_idsr‰   Úinputs_embedsr£   r¤   r¥   r¦   Úoutput_hidden_statesÚreturn_dictr§   c           $         s  ˆd k	rˆn| j j‰|d k	r |n| j j}ˆd k	r4ˆn| j j‰|d k	rH|n| j j}|d k	rj|d k	rjtdƒ‚nd|d k	r¢|  ||¡ | ¡ }| d|d ¡}|j	d }n,|d k	rÆ| ¡ d d… }|j	d }ntdƒ‚|d k	rÜ|j
n|j
}|d k	rú| d|d ¡}|d k	r| d|d ¡}|d kr8d}td gt| jƒ ƒ}n|d d  d¡}|d kr†tj||d | tj|d}| d¡ d|d ¡}|d k	rê|dkr¢tdƒ‚| |d¡}|d d …d d d d …f }|j| jd}d	| t | j¡j }| j jr8|d k	r8| ¡ \}}}||f}|	d kr,tj||d
}	|  |	¡}	nd }	|  || j j¡}|d kr`|  |¡}|  |¡}|| ‰ |d k	rŽ|  |¡}ˆ | ‰ |  ˆ ¡‰ d|dd …  ˆ  d¡f }| jrØ| jrØˆrØt  !d¡ d‰ˆrâdnd }ˆrðdnd }ˆr| j jrdnd }|rdnd }t"t#| j|ƒƒD ]˜\}\}}| j$ržtj% &ˆ j
¡ |d k	rnt‡ fdd„|D ƒƒ}|d k	r„| ˆ j
¡}t'|tj(ƒrž| ˆ j
¡}|r®|ˆ f }| jrð| jrð‡‡fdd„} tj)j* *| |ƒˆ d ||| ||	¡}!n|ˆ |||| ||	ˆˆd}!|!d ‰ ˆdkr,||!d f }ˆrl||!ˆr@dnd f }| j jrl||!ˆrbdnd f }| j$r*| j+ ,¡ D ]B\}"}#||#d kr~dt-|"ƒ | j.kr~ˆ  dt-|"d ƒ ¡‰ q~q*|  /ˆ ¡‰ ˆ  |¡‰ |rê|ˆ f }|stdd„ ˆ ||||fD ƒƒS t0ˆ ||||dS )NzDYou cannot specify both input_ids and inputs_embeds at the same timert   r   z5You have to specify either input_ids or inputs_embedsru   rw   z$batch_size has to be defined and > 0rM   r‘   ©rx   )rt   r   zZ`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...FrI   c                 3   s   | ]}|  ˆ j¡V  qd S r²   )r€   rx   ©rÝ   Z
past_state)r¡   rI   rJ   Ú	<genexpr>h  s     z$GPT2Model.forward.<locals>.<genexpr>c                    s   ‡ ‡‡fdd„}|S )Nc                     s   ˆ | ˆˆfžŽ S r²   rI   )rÆ   )rÎ   r¦   r¥   rI   rJ   Úcustom_forwardt  s    zHGPT2Model.forward.<locals>.create_custom_forward.<locals>.custom_forwardrI   )rÎ   r  )r¦   r¥   )rÎ   rJ   Úcreate_custom_forwards  s    z0GPT2Model.forward.<locals>.create_custom_forward)r¢   rˆ   r‰   r£   r¤   r¥   r¦   Tr*   r   rë   c                 s   s   | ]}|d k	r|V  qd S r²   rI   )rÝ   r÷   rI   rI   rJ   r  ¦  s   þ)Zlast_hidden_staterÅ   r¡   rÛ   Úcross_attentions)1rB   r¦   r  r¥   Úuse_return_dictr=   Z%warn_if_padding_and_no_attention_maskr|   rU   r<   rx   Útupler:   rä   r?   ÚarangeÚlongÚ	unsqueezer€   rN   r~   r   r¿   rS   Zinvert_attention_maskZget_head_maskrÍ   r)   r(   râ   rÒ   Ztrainingr-   Zwarning_onceÚ	enumerater6   ræ   rï   Ú
set_devicerÈ   r¯   Úutilsr  rç   rö   rò   rõ   rå   r   )$rj   r  rÅ   rˆ   r  r	  r‰   r
  r£   r¤   r¥   r¦   r  r  Zinput_shapeÚ
batch_sizerx   Zpast_lengthZencoder_batch_sizeZencoder_sequence_lengthr—   Zencoder_hidden_shapeZposition_embedsZtoken_type_embedsZoutput_shapeZpresentsZall_self_attentionsZall_cross_attentionsZall_hidden_statesrÞ   rø   r¢   r  r©   r™   r÷   rI   )r¡   r¦   r¥   rJ   rª   ë  sð    ÿ












ÿ


ù
ø

"


þûzGPT2Model.forward)N)NNNNNNNNNNNNN)r«   r¬   r­   rQ   r   ÚPARALLELIZE_DOCSTRINGrù   ÚDEPARALLELIZE_DOCSTRINGrü   rÿ   r  r  r   ÚGPT2_INPUTS_DOCSTRINGr   Ú_CHECKPOINT_FOR_DOCr   Ú_CONFIG_FOR_DOCr   r?   Ú
LongTensorr   r¯   r®   rT   r   rª   r°   rI   rI   rk   rJ   rÑ   ™  sV   
ý             ò
ñrÑ   z‡
    The GPT2 Model transformer with a language modeling head on top (linear layer with weights tied to the input
    embeddings).
    c                       s(  e Zd ZdgZ‡ fdd„Zeeƒddd„ƒZeeƒdd„ ƒZ	d	d
„ Z
dd„ Zddd„Zeeƒeeeeddeej eeeej   eej eej eej eej eej eej eej eej ee ee ee ee eeef dœdd„ƒƒZeeeej  ejeeej  dœdd„ƒZ‡  ZS )ÚGPT2LMHeadModelúlm_head.weightc                    sD   t ƒ  |¡ t|ƒ| _tj|j|jdd| _d| _	d | _
|  ¡  d S ©NF©r'   )rP   rQ   rÑ   rÄ   r   rÉ   Ún_embdrá   Úlm_headræ   rç   rè   ré   rk   rI   rJ   rQ   ¿  s    
zGPT2LMHeadModel.__init__Nc                 C   st   t  dt¡ |d kr0tt| jjƒttj	 
¡ ƒƒn|| _t| jt| jjƒƒ | j | j¡ | j | jj¡| _d| _d S )NaT  `GPT2LMHeadModel.parallelize` is deprecated and will be removed in v5 of Transformers, you should load your model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own `device_map` but it needs to be a dictionary module_name to device, so for instance {'transformer.h.0': 0, 'transformer.h.1': 1, ...}T)rì   rí   rî   r   r:   rÄ   rä   rã   r?   rï   rð   rç   r   rù   r'  r€   ró   ræ   ©rj   rç   rI   rI   rJ   rù   Ë  s    û	ÿýzGPT2LMHeadModel.parallelizec                 C   sF   t  dt¡ | j ¡  | j d¡| _| j d¡| _d| _tj	 
¡  d S ©Nrú   rê   F)rì   rí   rî   rÄ   rü   r€   r'  ræ   r?   rï   rû   rþ   rI   rI   rJ   rü   Þ  s    þ
zGPT2LMHeadModel.deparallelizec                 C   s   | j S r²   ©r'  rþ   rI   rI   rJ   Úget_output_embeddingsê  s    z%GPT2LMHeadModel.get_output_embeddingsc                 C   s
   || _ d S r²   r*  r   rI   rI   rJ   Úset_output_embeddingsí  s    z%GPT2LMHeadModel.set_output_embeddingsc           	      K   sð   |  dd ¡}|rD|d d …df  d¡}|d k	rD|d d …df  d¡}|  dd ¡}|  dd ¡}|d k	rª|d krª| ¡  d¡d }| |dkd¡ |r®|d d …df  d¡}nd }|d k	rÈ|d krÈd|i}nd|i}| ||  d	¡|||d
œ¡ |S )Nr  rt   rˆ   r	  r   r   r
  r  r¥   )rÅ   r¥   r	  rˆ   r  )Úgetr  r  ÚcumsumÚmasked_fill_Úupdate)	rj   r  rÅ   r
  rÇ   r  rˆ   r	  Zmodel_inputsrI   rI   rJ   Úprepare_inputs_for_generationð  s2    
ûÿ	z-GPT2LMHeadModel.prepare_inputs_for_generationr  )r  rÅ   rˆ   r  r	  r‰   r
  r£   r¤   Úlabelsr¥   r¦   r  r  r§   c                 C   s&  |dk	r|n| j j}| j|||||||||	||||d}|d }| jrftj | jj¡ | | j	j
j¡}|  	|¡}d}|
dk	rÚ|
 |j¡}
|ddd…dd…f  ¡ }|
ddd…f  ¡ }tƒ }|| d| d¡¡| d¡ƒ}|s
|f|dd…  }|dk	r|f| S |S t|||j|j|j|jdS )a³  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
            `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
            are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
        N)rÅ   rˆ   r  r	  r‰   r
  r£   r¤   r¥   r¦   r  r  r   .rt   r   )r×   rÙ   rÅ   r¡   rÛ   r  )rB   r  rÄ   ræ   r?   rï   r  ró   r€   r'  r%   rx   rž   r	   rU   r|   r   rÅ   r¡   rÛ   r  )rj   r  rÅ   rˆ   r  r	  r‰   r
  r£   r¤   r2  r¥   r¦   r  r  Útransformer_outputsr¡   Ú	lm_logitsr×   Úshift_logitsÚshift_labelsÚloss_fctÚoutputrI   rI   rJ   rª     sN    ó
úzGPT2LMHeadModel.forward©rÅ   Úbeam_idxr§   c                    s   t ‡ fdd„| D ƒƒS )á  
        This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or
        [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
        beam_idx at every generation step.
        c                 3   s$   | ]}t ‡ fd d„|D ƒƒV  qdS )c                 3   s"   | ]}|  d ˆ  |j¡¡V  qdS ©r   N©Zindex_selectr€   rx   r  ©r:  rI   rJ   r  n  s     z;GPT2LMHeadModel._reorder_cache.<locals>.<genexpr>.<genexpr>N©r  ©rÝ   r¢   r>  rI   rJ   r  m  s   ÿz1GPT2LMHeadModel._reorder_cache.<locals>.<genexpr>r?  ©rÅ   r:  rI   r>  rJ   Ú_reorder_cached  s    	þzGPT2LMHeadModel._reorder_cache)N)NN)NNNNNNNNNNNNNN)r«   r¬   r­   Ú_tied_weights_keysrQ   r   r  rù   r  rü   r+  r,  r1  r   r  r   r  r   r   r   r?   r!  r   r¯   r®   rT   r   rª   ÚstaticmethodrB  r°   rI   rI   rk   rJ   r"  µ  sf   

%ý              ñ
ðI þr"  ag  
The GPT2 Model transformer with a language modeling and a multiple-choice classification head on top e.g. for
RocStories/SWAG tasks. The two heads are two linear layers. The language modeling head has its weights tied to the
input embeddings, the classification head takes as input the input of a specified classification token index in the
input sequence).
c                       s&  e Zd ZdgZ‡ fdd„Zeeƒddd„ƒZeeƒdd„ ƒZ	d	d
„ Z
dd„ Zddd„Zeeƒeeeddeej eeeej   eej eej eej eej eej eej eej eej ee ee ee ee eeef dœdd„ƒƒZeeeej  ejeeej  dœdd„ƒZ‡  ZS )ÚGPT2DoubleHeadsModelr#  c                    sT   t ƒ  |¡ d|_t|ƒ| _tj|j|jdd| _	t
|ƒ| _d| _d | _|  ¡  d S )Nr   Fr%  )rP   rQ   Ú
num_labelsrÑ   rÄ   r   rÉ   r&  rá   r'  r   Úmultiple_choice_headræ   rç   rè   ré   rk   rI   rJ   rQ     s    

zGPT2DoubleHeadsModel.__init__Nc                 C   s†   t  dt¡ |d kr0tt| jjƒttj	 
¡ ƒƒn|| _t| jt| jjƒƒ | j | j¡ | j | jj¡| _| j | jj¡| _d| _d S )NaY  `GPT2DoubleHeadsModel.parallelize` is deprecated and will be removed in v5 of Transformers, you should load your model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own `device_map` but it needs to be a dictionary module_name to device, so for instance {'transformer.h.0': 0, 'transformer.h.1': 1, ...}T)rì   rí   rî   r   r:   rÄ   rä   rã   r?   rï   rð   rç   r   rù   r'  r€   ró   rG  ræ   r(  rI   rI   rJ   rù     s    û	ÿýz GPT2DoubleHeadsModel.parallelizec                 C   sT   t  dt¡ | j ¡  | j d¡| _| j d¡| _| j d¡| _d| _t	j
 ¡  d S r)  )rì   rí   rî   rÄ   rü   r€   r'  rG  ræ   r?   rï   rû   rþ   rI   rI   rJ   rü   ¡  s    þ
z"GPT2DoubleHeadsModel.deparallelizec                 C   s   | j S r²   r*  rþ   rI   rI   rJ   r+  ®  s    z*GPT2DoubleHeadsModel.get_output_embeddingsc                 C   s
   || _ d S r²   r*  r   rI   rI   rJ   r,  ±  s    z*GPT2DoubleHeadsModel.set_output_embeddingsc                 K   sÆ   |  dd ¡}|rD|d d …df  d¡}|d k	rD|d d …df  d¡}|  dd ¡}|  dd ¡}|d k	rª|d krª| ¡  d¡d }| |dkd¡ |r®|d d …df  d¡}nd }|||  d¡|||dœS )	Nr  rt   rˆ   r	  r   r   r¥   )r  rÅ   r¥   r	  rˆ   r  )r-  r  r  r.  r/  )rj   r  rÅ   rÇ   r  rˆ   r	  rI   rI   rJ   r1  ´  s(    úz2GPT2DoubleHeadsModel.prepare_inputs_for_generation)r  rÕ   )r  rÅ   rˆ   r  r	  r‰   r
  Úmc_token_idsr2  Ú	mc_labelsr¥   r¦   r  r  r§   c                 K   s|  |dk	r|n| j j}| j|||||||||||d}|d }| jrbtj | jj¡ | | j	j
j¡}|  	|¡}|  ||¡ d¡}d}|
dk	r®tƒ }|| d| d¡¡|
 d¡ƒ}d}|	dk	r|	 |j¡}	|ddd…dd…f  ¡ }|	ddd…f  ¡ }tƒ }|| d| d¡¡| d¡ƒ}|s`||f|dd…  }|dk	rH|f| }|dk	r\|f| S |S t|||||j|j|jdS )aŸ  
        mc_token_ids (`torch.LongTensor` of shape `(batch_size, num_choices)`, *optional*, default to index of the last token of the input):
            Index of the classification token in each input sequence. Selected in the range `[0, input_ids.size(-1) -
            1]`.
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
            `labels = input_ids`. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to
            `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size - 1]`
        mc_labels (`torch.LongTensor` of shape `(batch_size)`, *optional*):
            Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
            where *num_choices* is the size of the second dimension of the input tensors. (see *input_ids* above)

        Return:

        Example:

        ```python
        >>> import torch
        >>> from transformers import AutoTokenizer, GPT2DoubleHeadsModel

        >>> tokenizer = AutoTokenizer.from_pretrained("gpt2")
        >>> model = GPT2DoubleHeadsModel.from_pretrained("gpt2")

        >>> # Add a [CLS] to the vocabulary (we should train it also!)
        >>> num_added_tokens = tokenizer.add_special_tokens({"cls_token": "[CLS]"})
        >>> # Update the model embeddings with the new vocabulary size
        >>> embedding_layer = model.resize_token_embeddings(len(tokenizer))

        >>> choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"]
        >>> encoded_choices = [tokenizer.encode(s) for s in choices]
        >>> cls_token_location = [tokens.index(tokenizer.cls_token_id) for tokens in encoded_choices]

        >>> input_ids = torch.tensor(encoded_choices).unsqueeze(0)  # Batch size: 1, number of choices: 2
        >>> mc_token_ids = torch.tensor([cls_token_location])  # Batch size: 1

        >>> outputs = model(input_ids, mc_token_ids=mc_token_ids)
        >>> lm_logits = outputs.logits
        >>> mc_logits = outputs.mc_logits
        ```N©
rÅ   rˆ   r  r	  r‰   r
  r¥   r¦   r  r  r   rt   .r   )r×   rØ   rÙ   rÚ   rÅ   r¡   rÛ   )rB   r  rÄ   ræ   r?   rï   r  ró   r€   r'  r%   rx   rG  r5   r	   rU   r|   rž   rÖ   rÅ   r¡   rÛ   )rj   r  rÅ   rˆ   r  r	  r‰   r
  rH  r2  rI  r¥   r¦   r  r  rÇ   r3  r¡   r4  rÚ   rØ   r7  Zlm_lossr5  r6  r8  rI   rI   rJ   rª   Ñ  sZ    ;õ



ùzGPT2DoubleHeadsModel.forwardr9  c                    s   t ‡ fdd„| D ƒƒS )r;  c                 3   s$   | ]}t ‡ fd d„|D ƒƒV  qdS )c                 3   s"   | ]}|  d ˆ  |j¡¡V  qdS r<  r=  r  r>  rI   rJ   r  L  s     z@GPT2DoubleHeadsModel._reorder_cache.<locals>.<genexpr>.<genexpr>Nr?  r@  r>  rI   rJ   r  K  s   ÿz6GPT2DoubleHeadsModel._reorder_cache.<locals>.<genexpr>r?  rA  rI   r>  rJ   rB  B  s    	þz#GPT2DoubleHeadsModel._reorder_cache)N)N)NNNNNNNNNNNNNN)r«   r¬   r­   rC  rQ   r   r  rù   r  rü   r+  r,  r1  r   r  r   rÖ   r   r   r?   r!  r   r¯   r®   rT   r   rª   rD  rB  r°   rI   rI   rk   rJ   rE  s  s^   



              ñ
ïo þrE  aÔ  
    The GPT2 Model transformer with a sequence classification head on top (linear layer).

    [`GPT2ForSequenceClassification`] uses the last token in order to do the classification, as other causal models
    (e.g. GPT-1) do.

    Since it does classification on the last token, it requires to know the position of the last token. If a
    `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
    no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
    padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
    each row of the batch).
    c                       sª   e Zd Z‡ fdd„Zeeƒedeedd	e	e
j e	eee
j   e	e
j e	e
j e	e
j e	e
j e	e
j e	e
j e	e e	e e	e e	e eeef dœdd„ƒƒZ‡  ZS )
ÚGPT2ForSequenceClassificationc                    sL   t ƒ  |¡ |j| _t|ƒ| _tj|j| jdd| _d| _	d | _
|  ¡  d S r$  )rP   rQ   rF  rÑ   rÄ   r   rÉ   r&  Úscoreræ   rç   rè   ré   rk   rI   rJ   rQ   a  s    
z&GPT2ForSequenceClassification.__init__zmicrosoft/DialogRPT-updownr  N©r  rÅ   rˆ   r  r	  r‰   r
  r2  r¥   r¦   r  r  r§   c                 C   sD  |dk	r|n| j j}| j||||||||	|
||d}|d }|  |¡}|dk	rb|jdd… \}}n|jdd… \}}| j jdk	s|dkstdƒ‚| j jdkr¢d}nH|dk	rÒt || j j¡ 	¡  
d¡d  |j¡}nd}t | jj› d¡ |tj||jd	|f }d}|dk	rü| j jdkrn| jdkr4d
| j _n:| jdkrf|jtj	ks\|jtjkrfd| j _nd| j _| j jd
kr®tƒ }| jdkr¢|| ¡ | ¡ ƒ}n
|||ƒ}nN| j jdkrÞtƒ }|| d| j¡| d¡ƒ}n| j jdkrütƒ }|||ƒ}|s,|f|dd…  }|dk	r(|f| S |S t|||j|j|jdS )a  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
        NrJ  r   r*   r   z=Cannot handle batch sizes > 1 if no padding token is defined.rt   zŠ will not detect padding tokens in `inputs_embeds`. Results may be unexpected if using padding tokens in conjunction with `inputs_embeds.`r  Z
regressionZsingle_label_classificationZmulti_label_classification)r×   rÙ   rÅ   r¡   rÛ   )rB   r  rÄ   rL  r<   Zpad_token_idÚAssertionErrorr?   Úeqr  Zargmaxr€   rx   r-   Úwarningrl   r«   r  Zproblem_typerF  rN   r;   r
   r5   r	   rU   r   r   rÅ   r¡   rÛ   )rj   r  rÅ   rˆ   r  r	  r‰   r
  r2  r¥   r¦   r  r  r3  r¡   rÙ   r  Zsequence_lengthZsequence_lengthsZpooled_logitsr×   r7  r8  rI   rI   rJ   rª   n  s‚    õ

ÿÿþÿÿ

(

ûz%GPT2ForSequenceClassification.forward)NNNNNNNNNNNN)r«   r¬   r­   rQ   r   r  r   r   r   r   r?   r!  r   r¯   r®   rT   r   rª   r°   rI   rI   rk   rJ   rK  Q  sD   ý            ó
òrK  z£
    GPT2 Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
    Named-Entity-Recognition (NER) tasks.
    c                       sÆ   e Zd Z‡ fdd„Zeeƒedeedddddddddddddgdde	e
j e	eee
j   e	e
j e	e
j e	e
j e	e
j e	e
j e	e
j e	e e	e e	e e	e eeef d	œd
d„ƒƒZ‡  ZS )ÚGPT2ForTokenClassificationc                    s   t ƒ  |¡ |j| _t|ƒ| _t|dƒr:|jd k	r:|j}n t|dƒrV|jd k	rV|j}nd}t 	|¡| _
t |j|j¡| _d| _d | _|  ¡  d S )NÚclassifier_dropoutÚhidden_dropoutgš™™™™™¹?F)rP   rQ   rF  rÑ   rÄ   r¨   rR  rS  r   rd   rµ   rÉ   rW   Ú
classifierræ   rç   rè   )rj   rB   rR  rk   rI   rJ   rQ   Þ  s    
z#GPT2ForTokenClassification.__init__zbrad1141/gpt2-finetuned-comp2g      Ð?ZLeadZPosition)r  r  rÕ   Zexpected_lossZexpected_outputNrM  c                 C   sÈ   |dk	r|n| j j}| j||||||||	|
||d}|d }|  |¡}|  |¡}d}|dk	rˆ| |j¡}tƒ }|| d| j	¡| d¡ƒ}|s´|f|dd…  }|dk	r°|f| S |S t
|||j|jdS )a  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
        NrJ  r   rt   r*   )r×   rÙ   r¡   rÛ   )rB   r  rÄ   rµ   rT  r€   rx   r	   rU   rF  r   r¡   rÛ   )rj   r  rÅ   rˆ   r  r	  r‰   r
  r2  r¥   r¦   r  r  r3  r¡   rÙ   r×   r7  r8  rI   rI   rJ   rª   ó  s>    õ

üz"GPT2ForTokenClassification.forward)NNNNNNNNNNNN)r«   r¬   r­   rQ   r   r  r   r   r   r   r?   r!  r   r¯   r®   rT   r   rª   r°   rI   rI   rk   rJ   rQ  Ö  sH   û
            ó
òrQ  zí
    The GPT-2 Model transformer with a span classification head on top for extractive question-answering tasks like
    SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
    c                       s¤   e Zd Z‡ fdd„Zee d¡ƒeee	e
edd	eej eej eej eej eej eej eej eej ee ee ee eee	f dœdd„ƒƒZ‡  ZS )
ÚGPT2ForQuestionAnsweringc                    sL   t ƒ  |¡ |j| _t|ƒ| _t |jd¡| _d| _	d | _
d| _|  ¡  d S )Nr*   F)rP   rQ   rF  rÑ   rÄ   r   rÉ   rW   Ú
qa_outputsræ   rç   rÒ   rè   ré   rk   rI   rJ   rQ   @  s    
z!GPT2ForQuestionAnswering.__init__zbatch_size, sequence_length)r  r  rÕ   Zreal_checkpointN)r  rˆ   r  r	  r‰   r
  Ústart_positionsÚend_positionsr¦   r  r  r§   c                 C   s`  |dk	r|n| j j}| j|||||||	|
|d	}|d }|  |¡}|jddd\}}| d¡ ¡ }| d¡ ¡ }d}|dk	r|dk	rt| ¡ ƒdkrª| d¡ 	|j
¡}t| ¡ ƒdkrÌ| d¡ 	|j
¡}| d¡}| d|¡}| d|¡}t|d}|||ƒ}|||ƒ}|| d }|sJ||f|dd…  }|dk	rF|f| S |S t||||j|jd	S )
a  
        start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for position (index) of the start of the labelled span for computing the token classification loss.
            Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
            are not taken into account for computing the loss.
        end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for position (index) of the end of the labelled span for computing the token classification loss.
            Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
            are not taken into account for computing the loss.
        N)rˆ   r  r	  r‰   r
  r¦   r  r  r   r   rt   rm   )Zignore_indexr*   )r×   Ústart_logitsÚ
end_logitsr¡   rÛ   )rB   r  rÄ   rV  r7   r5   rž   r:   r|   r€   rx   Úclampr	   r   r¡   rÛ   )rj   r  rˆ   r  r	  r‰   r
  rW  rX  r¦   r  r  r©   Zsequence_outputrÙ   rY  rZ  Z
total_lossZignored_indexr7  Z
start_lossZend_lossr8  rI   rI   rJ   rª   N  sP    ÷




ûz GPT2ForQuestionAnswering.forward)NNNNNNNNNNN)r«   r¬   r­   rQ   r   r  Úformatr   r  r   r   r   r?   r!  r®   rT   r   r   rª   r°   rI   rI   rk   rJ   rU  8  sB   ü           ô
órU  )ErÔ   rË   r/   rì   Údataclassesr   Útypingr   r   r   r?   Ztorch.utils.checkpointr   Ztorch.cuda.ampr   Ztorch.nnr   r	   r
   Zactivationsr   Zmodeling_outputsr   r   r   r   r   Zmodeling_utilsr   r   Zpytorch_utilsr   r   r   r  r   r   r   r   r   r   Zutils.model_parallel_utilsr   r   Zconfiguration_gpt2r    Z
get_loggerr«   r-   r  r   Z"GPT2_PRETRAINED_MODEL_ARCHIVE_LISTrK   ÚModulerL   r±   r¶   rÃ   rÖ   ZGPT2_START_DOCSTRINGr  r  r  rÑ   r"  rE  rK  rQ  rU  rI   rI   rI   rJ   Ú<module>   s    
û
8 ^N1)C þ  û 8ù	 Vóvû[û