U
    ,-e                     @   s  d Z ddlZddlZddlmZmZmZ ddlZddlmZ ddl	m
Z
 ddlmZ ddlmZmZ dd	lmZ dd
lmZmZmZ ddlmZ eeZdZdZdgZd(ejejej e!dddZ"d)ej#ejee! dddZ$G dd dej%Z&G dd dej'Z(G dd dej'Z)G dd dej'Z*G dd deZ+dZ,G d d! d!e+Z-ed"e,G d#d$ d$e+Z.ed%e,G d&d' d'e+Z/dS )*z0 PyTorch TrOCR decoder model (based on RoBERTa).    N)OptionalTupleUnion)nn)CrossEntropyLoss   )ACT2FN))BaseModelOutputWithPastAndCrossAttentions!CausalLMOutputWithCrossAttentions)PreTrainedModel)add_start_docstringsloggingreplace_return_docstrings   )TrOCRConfigr   z microsoft/trocr-base-handwritten)input_ids_shapedtypedevicepast_key_values_lengthc                 C   s   | \}}t j||ft |j|d}t j|d|d}|||d |ddk d ||}|dkrt j	t j
||||d|gdd}|ddddddf |d||| S )zB
    Make causal mask used for bi-directional self-attention.
    )r   r   r   r   r   dimN)torchfullfinfominarangesizeZmasked_fill_viewtocatzerosexpand)r   r   r   r   bsztgt_lenmaskZ	mask_cond r'   i/var/www/html/Darija-Ai-Train/env/lib/python3.8/site-packages/transformers/models/trocr/modeling_trocr.py_make_causal_mask.   s    "
 r)   )r&   r   r%   c                 C   sj   |   \}}|dk	r|n|}| ddddddf |d|||}d| }||tjt|jS )z_
    Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
    Nr         ?)r   r#   r    Zmasked_fillr   boolr   r   )r&   r   r%   r$   src_lenZexpanded_maskZinverted_maskr'   r'   r(   _expand_mask@   s
    *r-   c                       s@   e Zd ZdZeed fddZd	ejed fddZ  Z	S )
TrOCRLearnedPositionalEmbeddingzN
    This module learns positional embeddings up to a fixed maximum size.
    )num_embeddingsembedding_dimc                    s   d| _ t || j  | d S )N   )offsetsuper__init__)selfr/   r0   	__class__r'   r(   r4   T   s    z(TrOCRLearnedPositionalEmbedding.__init__r   	input_idsr   c                    sH   |j dd \}}tj||| tj| jjd|d}t || j	 S )z3`input_ids' shape is expected to be [bsz x seqlen].Nr1   r   r   )
shaper   r   longweightr   r#   r3   forwardr2   )r5   r9   r   r$   seq_lenZ	positionsr6   r'   r(   r=   Z   s        z'TrOCRLearnedPositionalEmbedding.forward)r   )
__name__
__module____qualname____doc__intr4   r   Tensorr=   __classcell__r'   r'   r6   r(   r.   O   s   r.   c                       s   e Zd ZdZdeeee d fddZedeeee dddZe	
 de	jed
ddZde	jeee dddZ  ZS )"TrOCRSinusoidalPositionalEmbeddingzDThis module produces sinusoidal positional embeddings of any length.N)num_positionsr0   padding_idxc                    sB   t    d| _|| _|| _| |||| _| dt	d d S )Nr1   _float_tensorr   )
r3   r4   r2   r0   rH   get_embeddingweightsZregister_bufferr   FloatTensor)r5   rG   r0   rH   r6   r'   r(   r4   h   s    
z+TrOCRSinusoidalPositionalEmbedding.__init__)r/   r0   rH   c                 C   s   |d }t d|d  }ttj|tjd|  }tj| tjdd|d }tjt|t	|gdd
| d}|d dkrtj|t| dgdd}|dk	rd||ddf< |t S )	z
        Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the
        description in Section 3.5 of "Attention Is All You Need".
        r1   i'  r   )r   r   r   r   N)mathlogr   expr   floatZ	unsqueezer!   sincosr   r"   r    Zget_default_dtype)r/   r0   rH   Zhalf_dimZembr'   r'   r(   rJ   p   s     &z0TrOCRSinusoidalPositionalEmbedding.get_embeddingr   r8   c                 C   s   |  \}}| || j||j}| jd | }| jd ksL|| j dkr`| || j| j| _| j| j| _| j	d|
d
||d }|S )Nr   r   r   )r   "create_position_ids_from_input_idsrH   r    r   rK   rJ   r0   rI   index_selectr   detach)r5   r9   r   r$   r>   Zposition_idsZmax_posxr'   r'   r(   r=      s    "z*TrOCRSinusoidalPositionalEmbedding.forward)r9   rH   r   c                 C   s6   | | }tj|dd|| | }| | S )z
        Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding
        symbols are ignored. This is modified from fairseq's `utils.make_positions`.
        r   r   )nerC   r   ZcumsumZtype_asr;   )r5   r9   rH   r   r&   Zincremental_indicesr'   r'   r(   rS      s    zETrOCRSinusoidalPositionalEmbedding.create_position_ids_from_input_ids)N)N)r   )r   )r?   r@   rA   rB   rC   r   r4   staticmethodrJ   r   Zno_gradrD   r=   rS   rE   r'   r'   r6   r(   rF   e   s      rF   c                       s   e Zd ZdZdeeeeeeeed fddZej	eed	d
dZ
dej	eej	 eeej	  eej	 eej	 eeej	eej	 eeej	  f dddZ  ZS )TrOCRAttentionz>Multi-headed attention from 'Attention Is All You Need' paper.N        FT)	embed_dim	num_headskdimvdimdropout
is_decoderbiasis_cross_attentionc
           
         s   t    || _|d k	r|n|| _|d k	r.|n|| _|| _|| _|| | _| j| | jksrtd| j d| d| jd | _	|| _
tj| j||d| _tj| j||d| _tj|||d| _tj|||d| _d S )Nz;embed_dim must be divisible by num_heads (got `embed_dim`: z and `num_heads`: z).g      ࿩ra   )r3   r4   r[   r]   r^   r\   r_   head_dim
ValueErrorscalingr`   r   Lineark_projv_projq_projout_proj)
r5   configr[   r\   r]   r^   r_   r`   ra   rb   r6   r'   r(   r4      s"    

zTrOCRAttention.__init__)tensorr>   r$   c                 C   s    | ||| j| jdd S )Nr   r1   )r   r\   rd   	transpose
contiguous)r5   rm   r>   r$   r'   r'   r(   _shape   s    zTrOCRAttention._shape)hidden_stateskey_value_statespast_key_valueattention_masklayer_head_maskoutput_attentionsreturnc                 C   s\  |dk	}|  \}}	}
| || j }|rD|dk	rD|d }|d }n|rr| | |d|}| | |d|}n|dk	r| | |d|}| | |d|}tj|d |gdd}tj|d |gdd}n(| | |d|}| | |d|}| jr||f}|| j	 d| j
f}| ||	|j| }|j| }|j| }| d}t||dd}|  || j	 |	|fkrtd|| j	 |	|f d|   |dk	r|  |d|	|fkrtd	|d|	|f d|   ||| j	|	|| }||| j	 |	|}tjj|dd}|dk	r|  | j	fkrPtd
| j	f d|   |dddd||| j	|	| }||| j	 |	|}|r||| j	|	|}||| j	 |	|}nd}tjj|| j| jd}t||}|  || j	 |	| j
fkrtd|| j	|	| j
f d|   ||| j	|	| j
}|dd}|||	|
}| |}|||fS )z#Input shape: Batch x Time x ChannelNr   r   r   r1   r   z$Attention weights should be of size z	, but is z!Attention mask should be of size z/Head mask for a single layer should be of size ptrainingz `attn_output` should be of size )r   rj   rf   rp   rh   ri   r   r!   r`   r\   rd   r   Zbmmrn   re   r   
functionalZsoftmaxr_   rz   Zreshaperk   )r5   rq   rr   rs   rt   ru   rv   rb   r$   r%   r[   Zquery_statesZ
key_statesZvalue_statesZ
proj_shaper,   Zattn_weightsZattn_weights_reshapedZ
attn_probsZattn_outputr'   r'   r(   r=      st    





"
zTrOCRAttention.forward)NNrZ   FTF)NNNNF)r?   r@   rA   rB   rC   rP   r+   r4   r   rD   rp   r   r   r=   rE   r'   r'   r6   r(   rY      s@         !     rY   c                       st   e Zd Zed fddZd
ejeej eej eej eej eej eeej  ee	 ee	 d	dd	Z
  ZS )TrOCRDecoderLayerrl   c              
      s   t    |j| _t|| j|j|jdd| _|j| _t	|j
 | _|j| _t| j| _|jrt|| j|j|j|j|jddd| _t| j| _t| j|j| _t|j| j| _t| j| _d S )NT)r[   r\   r_   r`   )r[   r\   r]   r^   r_   r`   rb   )r3   r4   hidden_sizer[   rY   Zdecoder_attention_headsZattention_dropout	self_attnr_   r   Zactivation_functionactivation_fnactivation_dropoutr   	LayerNormself_attn_layer_normr`   Zcross_attention_hidden_sizeencoder_attnencoder_attn_layer_normrg   Zdecoder_ffn_dimfc1fc2final_layer_normr5   rl   r6   r'   r(   r4   9  s8    

zTrOCRDecoderLayer.__init__NFT)	rq   rt   encoder_hidden_statesencoder_attention_maskru   cross_attn_layer_head_maskrs   rv   	use_cachec
                 C   s^  |}
|dk	r|dd nd}| j |||||d\}}}tjj|| j| jd}|
| }| |}d}d}|dk	r|}
|dk	r|dd nd}| j||||||d\}}}tjj|| j| jd}|
| }| |}|| }|}
| | 	|}tjj|| j
| jd}| |}tjj|| j| jd}|
| }| |}|f}|rJ|||f7 }|	rZ||f7 }|S )a  
        Args:
            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
            attention_mask (`torch.FloatTensor`): attention mask of size
                `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
            encoder_hidden_states (`torch.FloatTensor`):
                cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
            encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
                `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
            layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
                `(encoder_attention_heads,)`.
            cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of
                size *(decoder_attention_heads,)*.
            past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
        Nr1   )rq   rs   rt   ru   rv   rx   )rq   rr   rt   ru   rs   rv   )r   r   r{   r_   rz   r   r   r   r   r   r   r   r   )r5   rq   rt   r   r   ru   r   rs   rv   r   ZresidualZself_attn_past_key_valueZself_attn_weightsZpresent_key_valueZcross_attn_present_key_valueZcross_attn_weightsZcross_attn_past_key_valueoutputsr'   r'   r(   r=   [  sT    
	



zTrOCRDecoderLayer.forward)NNNNNNFT)r?   r@   rA   r   r4   r   rD   r   r   r+   r=   rE   r'   r'   r6   r(   r|   8  s(   %        r|   c                   @   s*   e Zd ZeZdZdZdd Zd	ddZdS )
TrOCRPreTrainedModelmodelTc                 C   s   | j j}t|tjtjfrD|jjjd|d |j	d k	r~|j	j
  n:t|tjr~|jjjd|d |jd k	r~|jj|j 
  d S )NrZ   )Zmeanstd)rl   Zinit_std
isinstancer   rg   ZConv1dr<   dataZnormal_ra   Zzero_	EmbeddingrH   )r5   moduler   r'   r'   r(   _init_weights  s    

z"TrOCRPreTrainedModel._init_weightsFc                 C   s   t |tr||_d S N)r   TrOCRDecodergradient_checkpointing)r5   r   valuer'   r'   r(   _set_gradient_checkpointing  s    
z0TrOCRPreTrainedModel._set_gradient_checkpointingN)F)	r?   r@   rA   r   config_classZbase_model_prefixZsupports_gradient_checkpointingr   r   r'   r'   r'   r(   r     s
   r   aJ  
    This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
    library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
    etc.)

    This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
    Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
    and behavior.

    Parameters:
        config ([`TrOCRConfig`]):
            Model configuration class with all the parameters of the model. Initializing with a config file does not
            load the weights associated with the model, only the configuration. Check out the
            [`~PreTrainedModel.from_pretrained`] method to load the model weights.
c                       sH   e Zd ZdZed fddZdd Zdd Zd	d
 ZdddZ	  Z
S )r   z
    Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`TrOCRDecoderLayer`]

    Args:
        config: TrOCRConfig
    r}   c                    s   t     j| _ j| _ j| _ jr6t	 j
nd| _t j j
| j| _ jrjt j j
| _nt j| j d  j
| j| _ jrt j
| _nd | _t fddt jD | _d| _|   d S )Nr*   r   c                    s   g | ]}t  qS r'   )r|   ).0_r}   r'   r(   
<listcomp>  s     z)TrOCRDecoder.__init__.<locals>.<listcomp>F)r3   r4   r_   Zdecoder_layerdrop	layerdropZpad_token_idrH   Zscale_embeddingrM   sqrtr~   embed_scaler   r   
vocab_sizeembed_tokensuse_learned_position_embeddingsr.   Zmax_position_embeddingsembed_positionsrF   layernorm_embeddingr   Z
ModuleListrangeZdecoder_layerslayersr   	post_initr   r6   r}   r(   r4     s&     zTrOCRDecoder.__init__c                 C   s   | j S r   r   r5   r'   r'   r(   get_input_embeddings  s    z!TrOCRDecoder.get_input_embeddingsc                 C   s
   || _ d S r   r   r5   r   r'   r'   r(   set_input_embeddings  s    z!TrOCRDecoder.set_input_embeddingsc                 C   sX   d }|d dkr$t ||j|j|d}|d k	rTt||j|d d}|d krL|n|| }|S )Nr   r   )r   r   r%   )r)   r   r   r-   )r5   rt   input_shapeinputs_embedsr   Zcombined_attention_maskZexpanded_attn_maskr'   r'   r(   _prepare_decoder_attention_mask
  s    z,TrOCRDecoder._prepare_decoder_attention_maskNc                    s   dk	r n| j j |dk	r |n| j j}dk	r4n| j j|dk	rH|n| j j}|dk	rj|dk	rjtdnX|dk	r|}|d|jd }n8|dk	r| dd }|dddddf }ntd|dk	r|d d jd nd}|dkr| 	|| j
 }| j jr| j||d}n| j||d}|| }| jdk	r>| |}tjj|| j| jd}|j}| ||||}|dk	r|dk	rt||j|d d	}| jr| jrrtd
 d|rdnd} rdnd} r|dk	rdnd}rdnd}t||gddgD ]V\}}|dk	r
| d t| jkr
td| dt| j d| d  dq
t| jD ]F\}}|r||f7 }| jrtg }|| jk rql|dk	r|| nd}| jr"| jr" fdd}tjj  |||||||dk	r|| nd|dk	r|| ndd}n>||||||dk	r>|| nd|dk	rR|| nd| d	}|d }r|| r|dnd f7 } rl||d f7 }|dk	rl||d f7 }ql|r||f7 }r|nd}|st!dd |||||fD S t"|||||dS )a  
        Args:
            input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
                Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
                provide it.

                Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
                [`PreTrainedTokenizer.__call__`] for details.

                [What are input IDs?](../glossary#input-ids)
            attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
                Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:

                - 1 for tokens that are **not masked**,
                - 0 for tokens that are **masked**.

                [What are attention masks?](../glossary#attention-mask)
            encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
                Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
                of the decoder.
            encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
                Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
                selected in `[0, 1]`:

                - 1 for tokens that are **not masked**,
                - 0 for tokens that are **masked**.

                [What are attention masks?](../glossary#attention-mask)
            head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
                Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:

                - 1 indicates the head is **not masked**,
                - 0 indicates the head is **masked**.

            cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
                Mask to nullify selected heads of the attention modules in encoder to avoid performing cross-attention
                on hidden heads. Mask values selected in `[0, 1]`:

                - 1 indicates the head is **not masked**,
                - 0 indicates the head is **masked**.

            past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
                Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
                shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
                shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.

                Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
                cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.

                If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
                that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
                all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of
                shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing
                `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more
                control over how to convert `input_ids` indices into associated vectors than the model's internal
                embedding lookup matrix.
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            output_hidden_states (`bool`, *optional*):
                Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
                for more detail.
            return_dict (`bool`, *optional*):
                Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
        NzTYou cannot specify both decoder_input_ids and decoder_inputs_embeds at the same timer   zEYou have to specify either decoder_input_ids or decoder_inputs_embedsr   r1   )r   rx   r   z^`use_cache = True` is incompatible with gradient checkpointing. Setting `use_cache = False`...Fr'   	head_maskcross_attn_head_maskzThe `z` should be specified for z layers, but it is for .c                    s    fdd}|S )Nc                     s    | f S r   r'   )inputs)r   rv   r   r'   r(   custom_forward  s    zKTrOCRDecoder.forward.<locals>.create_custom_forward.<locals>.custom_forwardr'   )r   r   rv   r   )r   r(   create_custom_forward  s    z3TrOCRDecoder.forward.<locals>.create_custom_forward)rt   r   r   ru   r   rs   rv   r   r   r   c                 s   s   | ]}|d k	r|V  qd S r   r'   )r   vr'   r'   r(   	<genexpr>  s   z'TrOCRDecoder.forward.<locals>.<genexpr>)Zlast_hidden_statepast_key_valuesrq   
attentionscross_attentions)#rl   rv   output_hidden_statesr   use_return_dictre   r   r:   r   r   r   r   r   r   r   r{   r_   rz   r   r-   r   r   loggerZwarning_onceziplenr   	enumerater   Zrandr   utils
checkpointtupler	   )r5   r9   rt   r   r   r   r   r   r   r   rv   r   return_dictinputr   r   Z	embed_posrq   Zall_hidden_statesZall_self_attnsZall_cross_attentionsZnext_decoder_cacheZ	attn_maskZ	mask_nameidxZdecoder_layerZdropout_probabilityrs   r   Zlayer_outputsZ
next_cacher'   r   r(   r=     s    P


   
$



zTrOCRDecoder.forward)NNNNNNNNNNNN)r?   r@   rA   rB   r   r4   r   r   r   r=   rE   r'   r'   r6   r(   r     s$               r   zMThe TrOCR Model with a language modeling head. Can be used for summarization.c                       s(   e Zd ZdZ fddZdd Z  ZS )TrOCRDecoderWrapperz
    This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is
    used in combination with the [`EncoderDecoderModel`] framework.
    c                    s   t  | t|| _d S r   )r3   r4   r   decoderr   r6   r'   r(   r4   	  s    zTrOCRDecoderWrapper.__init__c                 O   s   | j ||S r   )r   )r5   argskwargsr'   r'   r(   r=     s    zTrOCRDecoderWrapper.forward)r?   r@   rA   rB   r4   r=   rE   r'   r'   r6   r(   r     s   r   zThe TrOCR Decoder with a language modeling head. Can be used as the decoder part of [`EncoderDecoderModel`] and [`VisionEncoderDecoder`].c                       s   e Zd ZdgZ fddZdd Zdd Zdd	 Zd
d Zdd Z	dd Z
eeeddeej eej eej eej eej eej eeeej   eej eej ee ee ee ee eeef dddZdddZedd Z  ZS )TrOCRForCausalLMzoutput_projection.weightc                    sN   t |}d|_d|_t | t|| _tj	|j
|jdd| _|   d S )NTFrc   )copydeepcopyr`   Zis_encoder_decoderr3   r4   r   r   r   rg   r~   r   output_projectionr   r   r6   r'   r(   r4     s    

zTrOCRForCausalLM.__init__c                 C   s
   | j jjS r   r   r   r   r   r'   r'   r(   r   %  s    z%TrOCRForCausalLM.get_input_embeddingsc                 C   s   || j j_d S r   r   r   r'   r'   r(   r   (  s    z%TrOCRForCausalLM.set_input_embeddingsc                 C   s   | j S r   r   r   r'   r'   r(   get_output_embeddings+  s    z&TrOCRForCausalLM.get_output_embeddingsc                 C   s
   || _ d S r   r   )r5   Znew_embeddingsr'   r'   r(   set_output_embeddings.  s    z&TrOCRForCausalLM.set_output_embeddingsc                 C   s   || j _d S r   r   r   )r5   r   r'   r'   r(   set_decoder1  s    zTrOCRForCausalLM.set_decoderc                 C   s   | j jS r   r   r   r'   r'   r(   get_decoder4  s    zTrOCRForCausalLM.get_decoder)output_typer   N)r9   rt   r   r   r   r   r   r   labelsr   rv   r   r   rw   c                 C   s   |dk	r|n| j j}|dk	r |n| j j}|dk	r4|n| j j}| jj|||||||||
|||d}| |d }d}|	dk	rt }||d| j j	|	d}|s|f|dd  }|dk	r|f| S |S t
|||j|j|j|jdS )aO  
        Args:
            input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
                Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
                provide it.

                Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
                [`PreTrainedTokenizer.__call__`] for details.

                [What are input IDs?](../glossary#input-ids)
            attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
                Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:

                - 1 for tokens that are **not masked**,
                - 0 for tokens that are **masked**.

                [What are attention masks?](../glossary#attention-mask)
            encoder_hidden_states  (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
                Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
                if the model is configured as a decoder.
            encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
                Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used
                in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
            head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
                Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:

                - 1 indicates the head is **not masked**,
                - 0 indicates the head is **masked**.

            cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
                Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:

                - 1 indicates the head is **not masked**,
                - 0 indicates the head is **masked**.

            past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
                Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
                shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
                shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional
                tensors are only required when the model is used as a decoder in a Sequence to Sequence model.

                Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
                cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.

                If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
                that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
                all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
            labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
                Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
                config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
                (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
            use_cache (`bool`, *optional*):
                If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
                (see `past_key_values`).

                - 1 for tokens that are **not masked**,
                - 0 for tokens that are **masked**.
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            output_hidden_states (`bool`, *optional*):
                Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
                for more detail.
            return_dict (`bool`, *optional*):
                Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.

        Returns:

        Example:

        ```python
        >>> from transformers import (
        ...     TrOCRConfig,
        ...     TrOCRProcessor,
        ...     TrOCRForCausalLM,
        ...     ViTConfig,
        ...     ViTModel,
        ...     VisionEncoderDecoderModel,
        ... )
        >>> import requests
        >>> from PIL import Image

        >>> # TrOCR is a decoder model and should be used within a VisionEncoderDecoderModel
        >>> # init vision2text model with random weights
        >>> encoder = ViTModel(ViTConfig())
        >>> decoder = TrOCRForCausalLM(TrOCRConfig())
        >>> model = VisionEncoderDecoderModel(encoder=encoder, decoder=decoder)

        >>> # If you want to start from the pretrained model, load the checkpoint with `VisionEncoderDecoderModel`
        >>> processor = TrOCRProcessor.from_pretrained("microsoft/trocr-base-handwritten")
        >>> model = VisionEncoderDecoderModel.from_pretrained("microsoft/trocr-base-handwritten")

        >>> # load image from the IAM dataset
        >>> url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg"
        >>> image = Image.open(requests.get(url, stream=True).raw).convert("RGB")
        >>> pixel_values = processor(image, return_tensors="pt").pixel_values
        >>> text = "industry, ' Mr. Brown commented icily. ' Let us have a"

        >>> # training
        >>> model.config.decoder_start_token_id = processor.tokenizer.cls_token_id
        >>> model.config.pad_token_id = processor.tokenizer.pad_token_id
        >>> model.config.vocab_size = model.config.decoder.vocab_size

        >>> labels = processor.tokenizer(text, return_tensors="pt").input_ids
        >>> outputs = model(pixel_values, labels=labels)
        >>> loss = outputs.loss
        >>> round(loss.item(), 2)
        5.30

        >>> # inference
        >>> generated_ids = model.generate(pixel_values)
        >>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
        >>> generated_text
        'industry, " Mr. Brown commented icily. " Let us have a'
        ```N)r9   rt   r   r   r   r   r   r   r   rv   r   r   r   r   r   )losslogitsr   rq   r   r   )rl   rv   r   r   r   r   r   r   r   r   r
   r   rq   r   r   )r5   r9   rt   r   r   r   r   r   r   r   r   rv   r   r   r   r   r   Zloss_fctoutputr'   r'   r(   r=   7  sF     zTrOCRForCausalLM.forwardc                 K   s:   |d kr| |j}|r,|d d dd f }||||dS )Nr   )r9   rt   r   r   )Znew_onesr:   )r5   r9   r   rt   r   r   r'   r'   r(   prepare_inputs_for_generation  s    z.TrOCRForCausalLM.prepare_inputs_for_generationc                    s.   d}| D ] }|t  fdd|D f7 }q|S )Nr'   c                 3   s"   | ]}| d  |jV  qdS )r   N)rT   r    r   )r   Z
past_statebeam_idxr'   r(   r     s     z2TrOCRForCausalLM._reorder_cache.<locals>.<genexpr>)r   )r   r   Zreordered_pastZ
layer_pastr'   r   r(   _reorder_cache  s    zTrOCRForCausalLM._reorder_cache)NNNNNNNNNNNNN)NNN)r?   r@   rA   Z_tied_weights_keysr4   r   r   r   r   r   r   r   r
   _CONFIG_FOR_DOCr   r   Z
LongTensorrD   rL   r   r+   r   r=   r   rX   r   rE   r'   r'   r6   r(   r     sZ   
             
 0     
r   )r   )N)0rB   r   rM   typingr   r   r   r   r   Ztorch.nnr   Zactivationsr   Zmodeling_outputsr	   r
   Zmodeling_utilsr   r   r   r   r   Zconfiguration_trocrr   Z
get_loggerr?   r   r   Z_CHECKPOINT_FOR_DOCZ#TROCR_PRETRAINED_MODEL_ARCHIVE_LISTSizer   r   rC   r)   rD   r-   r   r.   ModulerF   rY   r|   r   ZTROCR_START_DOCSTRINGr   r   r   r'   r'   r'   r(   <module>   sZ   
    >    "