U
    9%e                    @   s  d Z ddlZddlmZ ddlmZmZmZmZ ddl	Z	ddl
Z	ddl	mZ ddlmZ ddlmZmZmZmZ dd	lmZ dd
lmZmZmZ ddlmZmZmZmZmZ ddlm Z m!Z!m"Z" e#e$Z%dZ&dZ'dgZ(dZ)dZ*dZ+dZ,eG dd deZ-eG dd deZ.eG dd deZ/e	j0e	j0dddZ1e	j0e	j0dddZ2e"e3d d!d"Z4dSee3ef e5d$d%d&Z6G d'd( d(ej7Z8G d)d* d*ej9Z:G d+d, d,ej7Z;G d-d. d.ej7Z<G d/d0 d0ej7Z=G d1d2 d2ej7Z>G d3d4 d4ej7Z?G d5d6 d6ej7Z@G d7d8 d8ej7ZAG d9d: d:ej7ZBG d;d< d<ej7ZCG d=d> d>ej7ZDG d?d@ d@ej7ZEG dAdB dBej7ZFG dCdD dDej7ZGG dEdF dFej7ZHG dGdH dHej7ZIG dIdJ dJeZJedKe)G dLdM dMeJZKedNe)G dOdP dPeJZLee)G dQdR dReJZMdS )Tz PyTorch ALIGN model.    N)	dataclass)AnyOptionalTupleUnion)nn   )ACT2FN)BaseModelOutputWithNoAttention)BaseModelOutputWithPastAndCrossAttentions,BaseModelOutputWithPoolingAndCrossAttentions(BaseModelOutputWithPoolingAndNoAttention)PreTrainedModel)apply_chunking_to_forward find_pruneable_heads_and_indicesprune_linear_layer)ModelOutputadd_start_docstrings%add_start_docstrings_to_model_forwardloggingreplace_return_docstrings   )AlignConfigAlignTextConfigAlignVisionConfigzkakaobrain/align-baser   a>  
    This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
    library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
    etc.)

    This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
    Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
    and behavior.

    Parameters:
        config ([`AlignConfig`]): Model configuration class with all the parameters of the model.
            Initializing with a config file does not load the weights associated with the model, only the
            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
a
  
    Args:
        input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
            Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
            it.

            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
            [`PreTrainedTokenizer.__call__`] for details.

            [What are input IDs?](../glossary#input-ids)
        attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
            Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:

            - 1 for tokens that are **not masked**,
            - 0 for tokens that are **masked**.

            [What are attention masks?](../glossary#attention-mask)
        position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
            config.max_position_embeddings - 1]`.

            [What are position IDs?](../glossary#position-ids)
        token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
            Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
            1]`:

            - 0 corresponds to a *sentence A* token,
            - 1 corresponds to a *sentence B* token.

            [What are token type IDs?](../glossary#token-type-ids)
        head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
            Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:

            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.

        inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
            Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
            is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
            model's internal embedding lookup matrix.
        output_attentions (`bool`, *optional*):
            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
            tensors for more detail.
        output_hidden_states (`bool`, *optional*):
            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
            more detail.
        return_dict (`bool`, *optional*):
            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
a  
    Args:
        pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
            Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
            [`AutoImageProcessor`]. See [`EfficientNetImageProcessor.__call__`] for details.
        output_hidden_states (`bool`, *optional*):
            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
            more detail.
        return_dict (`bool`, *optional*):
            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
aW  
    Args:
       input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
            Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
            it.

            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
            [`PreTrainedTokenizer.__call__`] for details.

            [What are input IDs?](../glossary#input-ids)
        attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
            Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:

            - 1 for tokens that are **not masked**,
            - 0 for tokens that are **masked**.

            [What are attention masks?](../glossary#attention-mask)
        position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
            config.max_position_embeddings - 1]`.

            [What are position IDs?](../glossary#position-ids)
        token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
            Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
            1]`:

            - 0 corresponds to a *sentence A* token,
            - 1 corresponds to a *sentence B* token.

            [What are token type IDs?](../glossary#token-type-ids)
        head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
            Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:

            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.

        inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
            Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
            is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
            model's internal embedding lookup matrix.
        pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
            Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
            [`AutoImageProcessor`]. See [`EfficientNetImageProcessor.__call__`] for details.
        return_loss (`bool`, *optional*):
            Whether or not to return the contrastive loss.
        output_attentions (`bool`, *optional*):
            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
            tensors for more detail.
        output_hidden_states (`bool`, *optional*):
            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
            more detail.
        return_dict (`bool`, *optional*):
            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
c                   @   sH   e Zd ZU dZdZeej ed< dZ	ejed< dZ
eeej  ed< dS )AlignVisionModelOutputa+  
    Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states.

    Args:
        image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
            The image embeddings obtained by applying the projection layer to the pooler_output.
        last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
            Sequence of hidden-states at the output of the last layer of the model.
        hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
            one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.

            Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
    Nimage_embedslast_hidden_statehidden_states)__name__
__module____qualname____doc__r   r   torchFloatTensor__annotations__r   r   r    r&   r&   g/var/www/html/Darija-Ai-API/env/lib/python3.8/site-packages/transformers/models/align/modeling_align.pyr      s   
r   c                   @   s^   e Zd ZU dZdZeej ed< dZ	ejed< dZ
eeej  ed< dZeeej  ed< dS )AlignTextModelOutputa  
    Base class for text model's outputs that also contains a pooling of the last hidden states.

    Args:
        text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
            The text embeddings obtained by applying the projection layer to the pooler_output.
        last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
            Sequence of hidden-states at the output of the last layer of the model.
        hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
            one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.

            Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
        attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
            Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
            sequence_length)`.

            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
            heads.
    Ntext_embedsr   r   
attentions)r   r    r!   r"   r)   r   r#   r$   r%   r   r   r   r*   r&   r&   r&   r'   r(      s
   
r(   c                   @   s   e Zd ZU dZdZeej ed< dZ	ejed< dZ
ejed< dZejed< dZejed< dZeed< dZeed	< ee d
ddZdS )AlignOutputa  
    Args:
        loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
            Contrastive loss for image-text similarity.
        logits_per_image:(`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
            The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
            similarity scores.
        logits_per_text:(`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
            The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
            similarity scores.
        text_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`):
            The text embeddings obtained by applying the projection layer to the pooled output of [`AlignTextModel`].
        image_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`):
            The output of [`AlignVisionModel`].
        text_model_output(`BaseModelOutputWithPoolingAndCrossAttentions`):
            The output of the [`AlignTextModel`].
        vision_model_output(`BaseModelOutputWithPoolingAndNoAttention`):
            The output of the [`AlignVisionModel`].
    Nlosslogits_per_imagelogits_per_textr)   r   text_model_outputvision_model_outputreturnc                    s   t  fdd  D S )Nc                 3   s,   | ]$}|d kr | nt  | V  qdS ))r/   r0   N)getattrto_tuple).0kselfr&   r'   	<genexpr>  s   z'AlignOutput.to_tuple.<locals>.<genexpr>)tuplekeysr7   r&   r7   r'   r4     s    zAlignOutput.to_tuple)r   r    r!   r"   r,   r   r#   r$   r%   r-   r.   r)   r   r/   r   r0   r   r   r   r4   r&   r&   r&   r'   r+      s   
r+   )logitsr2   c                 C   s"   t jj| tjt| | jdddS )Ndeviceg?)Zlabel_smoothing)r   
functionalZcross_entropyr#   arangelenr>   )r<   r&   r&   r'   contrastive_loss  s    rB   )
similarityr2   c                 C   s    t | }t |  }|| d S )Ng       @)rB   t)rC   Zcaption_lossZ
image_lossr&   r&   r'   
align_loss  s    rE   )confignum_channelsc                 C   sJ   | j }|| j9 }t|t||d  | | }|d| k rB||7 }t|S )z<
    Round number of filters based on depth multiplier.
       g?)Zdepth_divisorZwidth_coefficientmaxint)rF   rG   ZdivisorZnew_dimr&   r&   r'   round_filters"  s    
rK   T)kernel_sizeadjustc                 C   sr   t | tr| | f} | d d | d d f}|rR|d d |d |d d |d fS |d |d |d |d fS dS )aJ  
    Utility function to get the tuple padding value for the depthwise convolution.

    Args:
        kernel_size (`int` or `tuple`):
            Kernel size of the convolution layers.
        adjust (`bool`, *optional*, defaults to `True`):
            Adjusts padding value to apply to right and bottom sides of the input.
    r   rH   r   N)
isinstancerJ   )rL   rM   Zcorrectr&   r&   r'   correct_pad2  s    

$rO   c                       s:   e Zd ZdZed fddZejejdddZ  Z	S )AlignVisionEmbeddingszL
    A module that corresponds to the stem module of the original work.
    rF   c                    sh   t    t|d| _tjdd| _tj|j| jddddd| _	tj
| j|j|jd	| _t|j | _d S )
N    )r   r   r   r   paddingr   rH   validFrL   striderT   bias)epsmomentum)super__init__rK   out_dimr   	ZeroPad2drT   Conv2drG   convolutionBatchNorm2dbatch_norm_epsbatch_norm_momentum	batchnormr	   
hidden_act
activationr8   rF   	__class__r&   r'   r\   L  s    
     zAlignVisionEmbeddings.__init__)pixel_valuesr2   c                 C   s,   |  |}| |}| |}| |}|S N)rT   r`   rd   rf   )r8   rj   featuresr&   r&   r'   forwardW  s
    



zAlignVisionEmbeddings.forward)
r   r    r!   r"   r   r\   r#   Tensorrm   __classcell__r&   r&   rh   r'   rP   G  s   rP   c                       s   e Zd Zd fdd	Z  ZS )	AlignVisionDepthwiseConv2dr   r   r   Tzerosc	           
         s*   || }	t  j||	|||||||d	 d S )N)	in_channelsout_channelsrL   rW   rT   dilationgroupsrX   padding_mode)r[   r\   )
r8   rr   Zdepth_multiplierrL   rW   rT   rt   rX   rv   rs   rh   r&   r'   r\   b  s    z#AlignVisionDepthwiseConv2d.__init__)r   r   r   r   r   Trq   )r   r    r!   r\   ro   r&   r&   rh   r'   rp   a  s          rp   c                       s@   e Zd ZdZeeeed fddZejej	dddZ
  ZS )AlignVisionExpansionLayerz_
    This corresponds to the expansion phase of each block in the original implementation.
    rF   in_dimr]   rW   c                    sB   t    tj||dddd| _tj||jd| _t|j	 | _
d S )Nr   sameFrr   rs   rL   rT   rX   )num_featuresrY   )r[   r\   r   r_   expand_convra   rb   	expand_bnr	   re   
expand_act)r8   rF   ry   r]   rW   rh   r&   r'   r\     s    
z"AlignVisionExpansionLayer.__init__r   r2   c                 C   s"   |  |}| |}| |}|S rk   )r}   r~   r   r8   r   r&   r&   r'   rm     s    


z!AlignVisionExpansionLayer.forward)r   r    r!   r"   r   rJ   r\   r#   r$   rn   rm   ro   r&   r&   rh   r'   rw   |  s   rw   c                       sB   e Zd ZdZeeeeed fddZej	ej
dddZ  ZS )AlignVisionDepthwiseLayerzk
    This corresponds to the depthwise convolution phase of each block in the original implementation.
    rF   ry   rW   rL   adjust_paddingc                    sv   t    || _| jdkrdnd}t||d}tj|d| _t||||dd| _tj	||j
|jd| _t|j | _d S )	NrH   rU   rz   )rM   rS   FrV   r|   rY   rZ   )r[   r\   rW   rO   r   r^   depthwise_conv_padrp   depthwise_convra   rb   rc   depthwise_normr	   re   depthwise_act)r8   rF   ry   rW   rL   r   Zconv_padrT   rh   r&   r'   r\     s$    
      z"AlignVisionDepthwiseLayer.__init__r   c                 C   s6   | j dkr| |}| |}| |}| |}|S )NrH   )rW   r   r   r   r   r   r&   r&   r'   rm     s    




z!AlignVisionDepthwiseLayer.forwardr   r    r!   r"   r   rJ   boolr\   r#   r$   rn   rm   ro   r&   r&   rh   r'   r     s   r   c                       sB   e Zd ZdZd	eeeed fddZej	ej
dddZ  ZS )
AlignVisionSqueezeExciteLayerzl
    This corresponds to the Squeeze and Excitement phase of each block in the original implementation.
    FrF   ry   
expand_dimexpandc                    s   t    |r|n|| _tdt||j | _tjdd| _	tj
| j| jddd| _tj
| j| jddd| _t|j | _t | _d S )Nr   )Zoutput_sizerz   )rr   rs   rL   rT   )r[   r\   dimrI   rJ   Zsqueeze_expansion_ratioZdim_ser   ZAdaptiveAvgPool2dsqueezer_   reducer   r	   re   
act_reduceZSigmoid
act_expand)r8   rF   ry   r   r   rh   r&   r'   r\     s$    
z&AlignVisionSqueezeExciteLayer.__init__r   c                 C   sF   |}|  |}| |}| |}| |}| |}t||}|S rk   )r   r   r   r   r   r#   mul)r8   r   inputsr&   r&   r'   rm     s    




z%AlignVisionSqueezeExciteLayer.forward)Fr   r&   r&   rh   r'   r     s   r   c                       sH   e Zd ZdZeeeeeed fddZe	j
e	j
e	jdddZ  ZS )AlignVisionFinalBlockLayerz[
    This corresponds to the final phase of each block in the original implementation.
    rF   ry   r]   rW   	drop_rateid_skipc                    sX   t    |dko| | _tj||dddd| _tj||j|jd| _	tj
|d| _d S )Nr   rz   Fr{   r   )p)r[   r\   apply_dropoutr   r_   project_convra   rb   rc   
project_bnDropoutdropout)r8   rF   ry   r]   rW   r   r   rh   r&   r'   r\     s    
  z#AlignVisionFinalBlockLayer.__init__)
embeddingsr   r2   c                 C   s0   |  |}| |}| jr,| |}|| }|S rk   )r   r   r   r   )r8   r   r   r&   r&   r'   rm     s    


z"AlignVisionFinalBlockLayer.forwardr   r    r!   r"   r   rJ   floatr   r\   r#   r$   rn   rm   ro   r&   r&   rh   r'   r     s        r   c                
       sJ   e Zd ZdZeeeeeeeeed	 fddZe	j
e	jdddZ  ZS )AlignVisionBlocka  
    This corresponds to the block module of original the EfficientNet vision encoder implementation.

    Args:
        config ([`AlignVisionConfig`]):
            Model configuration class.
        in_dim (`int`):
            Number of input channels.
        out_dim (`int`):
            Number of output channels.
        stride (`int`):
            Stride size to be used in convolution layers.
        expand_ratio (`int`):
            Expand ratio to set the output dimensions for the expansion and squeeze-excite layers.
        kernel_size (`int`):
            Kernel size for the depthwise convolution layer.
        drop_rate (`float`):
            Dropout rate to be used in the final phase of each block.
        id_skip (`bool`):
            Whether to apply dropout and sum the final hidden states with the input embeddings during the final phase
            of each block. Set to `True` for the first block of each stage.
        adjust_padding (`bool`):
            Whether to apply padding to only right and bottom side of the input kernel before the depthwise convolution
            operation, set to `True` for inputs with odd input sizes.
    )	rF   ry   r]   rW   expand_ratiorL   r   r   r   c
                    s   t    || _| jdkrdnd| _|| }
| jrDt|||
|d| _t|| jrR|
n||||	d| _t|||
| jd| _	t
|| jr|
n|||||d| _d S )Nr   TFrx   r   r   r   )r[   r\   r   r   rw   	expansionr   r   r   squeeze_exciter   
projection)r8   rF   ry   r]   rW   r   rL   r   r   r   Zexpand_in_dimrh   r&   r'   r\   "  s@    
      zAlignVisionBlock.__init__r   c                 C   s<   |}| j dkr| |}| |}| |}| ||}|S )Nr   )r   r   r   r   r   )r8   r   r   r&   r&   r'   rm   K  s    



zAlignVisionBlock.forwardr   r&   r&   rh   r'   r     s   )r   c                       sF   e Zd ZdZed fddZd
ejee	 ee	 e
ddd	Z  ZS )AlignVisionEncoderz
    Forward propogates the embeddings through each vision encoder (EfficientNet) block.

    Args:
        config ([`AlignVisionConfig`]):
            Model configuration class.
    rQ   c                    s8  t    |j_fdd t|j}t fdd|jD }d}g }t|D ]}t||j| }t||j	| }|j
| }	|j| }
|j| }t |j| D ]}|dkrdnd}|dkrdn|	}	|dkr|n|}||jkrdnd}|j| | }t||||	|
||||d		}|| |d7 }qqPt|_d S )
Nc                    s   t t j|  S rk   )rJ   mathceildepth_coefficient)Zrepeatsr7   r&   r'   round_repeatse  s    z2AlignVisionEncoder.__init__.<locals>.round_repeatsc                 3   s   | ]} |V  qd S rk   r&   )r5   n)r   r&   r'   r9   j  s     z.AlignVisionEncoder.__init__.<locals>.<genexpr>r   TFr   )	rF   ry   r]   rW   rL   r   r   r   r   )r[   r\   r   rA   rr   sumZnum_block_repeatsrangerK   rs   stridesZkernel_sizesZexpand_ratiosZdepthwise_paddingZdrop_connect_rater   appendr   
ModuleListblocks)r8   rF   Znum_base_blocksZ
num_blocksZcurr_block_numr   iry   r]   rW   rL   r   jr   r   r   blockrh   )r   r8   r'   r\   a  sB    





zAlignVisionEncoder.__init__FT)r   output_hidden_statesreturn_dictr2   c                 C   sV   |r
|fnd }| j D ]}||}|r||f7 }q|sJtdd ||fD S t||dS )Nc                 s   s   | ]}|d k	r|V  qd S rk   r&   r5   vr&   r&   r'   r9     s      z-AlignVisionEncoder.forward.<locals>.<genexpr>)r   r   )r   r:   r
   )r8   r   r   r   all_hidden_statesr   r&   r&   r'   rm     s    
zAlignVisionEncoder.forward)FT)r   r    r!   r"   r   r\   r#   r$   r   r   r   rm   ro   r&   r&   rh   r'   r   X  s   .  r   c                       sT   e Zd ZdZ fddZd	eej eej eej eej e	ej
dddZ  ZS )
AlignTextEmbeddingszGConstruct the embeddings from word, position and token_type embeddings.c                    s   t    tj|j|j|jd| _t|j|j| _	t|j
|j| _tj|j|jd| _t|j| _t|dd| _| jdt|jddd | jd	tj| j tjd
dd d S )N)padding_idxrY   position_embedding_typeabsoluteposition_ids)r   F)
persistenttoken_type_idsdtype)r[   r\   r   	EmbeddingZ
vocab_sizehidden_sizeZpad_token_idword_embeddingsmax_position_embeddingsposition_embeddingsZtype_vocab_sizetoken_type_embeddings	LayerNormlayer_norm_epsr   hidden_dropout_probr   r3   r   Zregister_bufferr#   r@   r   rq   r   sizelongrg   rh   r&   r'   r\     s"    
    zAlignTextEmbeddings.__init__Nr   )	input_idsr   r   inputs_embedspast_key_values_lengthr2   c                 C   s   |d k	r|  }n|  d d }|d }|d krL| jd d ||| f }|d krt| dr| jd d d |f }||d |}	|	}ntj|tj| jjd}|d kr| 	|}| 
|}
||
 }| jdkr| |}||7 }| |}| |}|S )Nr   r   r   r   r   r>   r   )r   r   hasattrr   r   r#   rq   r   r>   r   r   r   r   r   r   )r8   r   r   r   r   r   input_shape
seq_lengthbuffered_token_type_ids buffered_token_type_ids_expandedr   r   r   r&   r&   r'   rm     s,    







zAlignTextEmbeddings.forward)NNNNr   )r   r    r!   r"   r\   r   r#   
LongTensorr$   rJ   rn   rm   ro   r&   r&   rh   r'   r     s        r   c                
       s   e Zd Zd fdd	ZejejdddZdejeej eej eej eej ee	e	ej   ee
 e	ej dd	d
Z  ZS )AlignTextSelfAttentionNc                    s   t    |j|j dkr>t|ds>td|j d|j d|j| _t|j|j | _| j| j | _t	
|j| j| _t	
|j| j| _t	
|j| j| _t	|j| _|pt|dd| _| jdks| jd	kr|j| _t	d
|j d | j| _|j| _d S )Nr   Zembedding_sizezThe hidden size (z6) is not a multiple of the number of attention heads ()r   r   relative_keyrelative_key_queryrH   r   )r[   r\   r   num_attention_headsr   
ValueErrorrJ   attention_head_sizeall_head_sizer   Linearquerykeyvaluer   Zattention_probs_dropout_probr   r3   r   r   r   distance_embedding
is_decoderr8   rF   r   rh   r&   r'   r\     s*    
  zAlignTextSelfAttention.__init__)xr2   c                 C   s6   |  d d | j| jf }||}|ddddS )Nr   r   rH   r   r   )r   r   r   viewpermute)r8   r   Znew_x_shaper&   r&   r'   transpose_for_scores  s    
z+AlignTextSelfAttention.transpose_for_scoresFr   attention_mask	head_maskencoder_hidden_statesencoder_attention_maskpast_key_valueoutput_attentionsr2   c                 C   s  |  |}|d k	}	|	r4|d k	r4|d }
|d }|}n|	r^| | |}
| | |}|}nv|d k	r| | |}
| | |}tj|d |
gdd}
tj|d |gdd}n | | |}
| | |}| |}|d k	}| jr|
|f}t||
dd}| j	dks | j	dkr|j
d |
j
d  }}|r^tj|d tj|jd	dd}ntj|tj|jd	dd}tj|tj|jd	dd}|| }| || j d }|j|jd
}| j	dkrtd||}|| }n4| j	dkrtd||}td|
|}|| | }|t| j }|d k	r:|| }tjj|dd}| |}|d k	rf|| }t||}|dddd }| d d | jf }||}|r||fn|f}| jr||f }|S )Nr   r   rH   r   r   r   r   r   r   zbhld,lrd->bhlrzbhrd,lrd->bhlrr   ) r   r   r   r   r#   catr   matmulZ	transposer   shapetensorr   r>   r   r@   r   r   tor   Zeinsumr   sqrtr   r   r?   Zsoftmaxr   r   
contiguousr   r   )r8   r   r   r   r   r   r   r   Zmixed_query_layerZis_cross_attentionZ	key_layerZvalue_layerZquery_layer	use_cacheZattention_scoresZquery_lengthZ
key_lengthZposition_ids_lZposition_ids_rZdistanceZpositional_embeddingZrelative_position_scoresZrelative_position_scores_queryZrelative_position_scores_keyZattention_probsZcontext_layerZnew_context_layer_shapeoutputsr&   r&   r'   rm     sp    


 





zAlignTextSelfAttention.forward)N)NNNNNF)r   r    r!   r\   r#   rn   r   r   r$   r   r   rm   ro   r&   r&   rh   r'   r     s$         r   c                       s4   e Zd Z fddZejejejdddZ  ZS )AlignTextSelfOutputc                    sB   t    t|j|j| _tj|j|jd| _t|j	| _
d S Nr   )r[   r\   r   r   r   denser   r   r   r   r   rg   rh   r&   r'   r\   l  s    
zAlignTextSelfOutput.__init__r   input_tensorr2   c                 C   s&   |  |}| |}| || }|S rk   r  r   r   r8   r   r  r&   r&   r'   rm   r  s    

zAlignTextSelfOutput.forwardr   r    r!   r\   r#   rn   rm   ro   r&   r&   rh   r'   r  k  s   r  c                
       sv   e Zd Zd
 fdd	Zdd Zdejeej eej eej eej ee	e	ej   ee
 e	ej ddd	Z  ZS )AlignTextAttentionNc                    s.   t    t||d| _t|| _t | _d S )Nr   )r[   r\   r   r8   r  outputsetpruned_headsr   rh   r&   r'   r\   {  s    

zAlignTextAttention.__init__c                 C   s   t |dkrd S t|| jj| jj| j\}}t| jj|| j_t| jj|| j_t| jj	|| j_	t| j
j|dd| j
_| jjt | | j_| jj| jj | j_| j|| _d S )Nr   r   r   )rA   r   r8   r   r   r  r   r   r   r   r  r  r   union)r8   Zheadsindexr&   r&   r'   prune_heads  s       zAlignTextAttention.prune_headsFr   c              	   C   s<   |  |||||||}| |d |}	|	f|dd   }
|
S )Nr   r   )r8   r  )r8   r   r   r   r   r   r   r   Zself_outputsattention_outputr  r&   r&   r'   rm     s    
	zAlignTextAttention.forward)N)NNNNNF)r   r    r!   r\   r  r#   rn   r   r$   r   r   rm   ro   r&   r&   rh   r'   r
  z  s$         r
  c                       s0   e Zd Z fddZejejdddZ  ZS )AlignTextIntermediatec                    sB   t    t|j|j| _t|jt	r6t
|j | _n|j| _d S rk   )r[   r\   r   r   r   intermediate_sizer  rN   re   strr	   intermediate_act_fnrg   rh   r&   r'   r\     s
    
zAlignTextIntermediate.__init__r   c                 C   s   |  |}| |}|S rk   )r  r  r   r&   r&   r'   rm     s    

zAlignTextIntermediate.forwardr	  r&   r&   rh   r'   r    s   r  c                       s4   e Zd Z fddZejejejdddZ  ZS )AlignTextOutputc                    sB   t    t|j|j| _tj|j|jd| _t	|j
| _d S r  )r[   r\   r   r   r  r   r  r   r   r   r   r   rg   rh   r&   r'   r\     s    
zAlignTextOutput.__init__r  c                 C   s&   |  |}| |}| || }|S rk   r  r  r&   r&   r'   rm     s    

zAlignTextOutput.forwardr	  r&   r&   rh   r'   r    s   r  c                
       st   e Zd Z fddZd
ejeej eej eej eej eeeej   ee	 eej dddZ
dd	 Z  ZS )AlignTextLayerc                    sr   t    |j| _d| _t|| _|j| _|j| _| jrZ| jsLt|  dt|dd| _	t
|| _t|| _d S )Nr   z> should be used as a decoder model if cross attention is addedr   r  )r[   r\   chunk_size_feed_forwardseq_len_dimr
  	attentionr   add_cross_attentionr   crossattentionr  intermediater  r  rg   rh   r&   r'   r\     s    


zAlignTextLayer.__init__NFr   c              	   C   s  |d k	r|d d nd }| j |||||d}	|	d }
| jrP|	dd }|	d }n|	dd  }d }| jr|d k	rt| dstd|  d|d k	r|d	d  nd }| |
||||||}|d }
||dd  }|d }|| }t| j| j| j|
}|f| }| jr||f }|S )
NrH   r   r   r   r   r   r  z'If `encoder_hidden_states` are passed, z` has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`r   )	r  r   r   r   r  r   feed_forward_chunkr  r  )r8   r   r   r   r   r   r   r   Zself_attn_past_key_valueZself_attention_outputsr  r  Zpresent_key_valueZcross_attn_present_key_valueZcross_attn_past_key_valueZcross_attention_outputslayer_outputr&   r&   r'   rm     sV    


	   

zAlignTextLayer.forwardc                 C   s   |  |}| ||}|S rk   )r  r  )r8   r  Zintermediate_outputr!  r&   r&   r'   r     s    
z!AlignTextLayer.feed_forward_chunk)NNNNNF)r   r    r!   r\   r#   rn   r   r$   r   r   rm   r   ro   r&   r&   rh   r'   r    s$         Ar  c                       s   e Zd Z fddZd	ejeej eej eej eej eeeej   ee	 ee	 ee	 ee	 e
eej ef dddZ  ZS )
AlignTextEncoderc                    s:   t     | _t fddt jD | _d| _d S )Nc                    s   g | ]}t  qS r&   )r  )r5   _rQ   r&   r'   
<listcomp>&  s     z-AlignTextEncoder.__init__.<locals>.<listcomp>F)	r[   r\   rF   r   r   r   num_hidden_layerslayergradient_checkpointingrg   rh   rQ   r'   r\   #  s    
 zAlignTextEncoder.__init__NFT)r   r   r   r   r   past_key_valuesr   r   r   r   r2   c              	      st  |	rdnd } rdnd } r(| j jr(dnd }| jrJ| jrJ|rJtd d}|rRdnd }t| jD ]\}}|	rv||f }|d k	r|| nd }|d k	r|| nd | jr| jrև fdd}tj	j

|||||||}n|||||| }|d }|r||d f7 } r`||d f }| j jr`||d	 f }q`|	r@||f }|
sbtd
d |||||fD S t|||||dS )Nr&   zZ`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...Fc                    s    fdd}|S )Nc                     s    | f S rk   r&   )r   )moduler   r   r&   r'   custom_forwardL  s    zOAlignTextEncoder.forward.<locals>.create_custom_forward.<locals>.custom_forwardr&   )r)  r*  r  )r)  r'   create_custom_forwardK  s    z7AlignTextEncoder.forward.<locals>.create_custom_forwardr   r   r   rH   c                 s   s   | ]}|d k	r|V  qd S rk   r&   r   r&   r&   r'   r9   p  s   z+AlignTextEncoder.forward.<locals>.<genexpr>)r   r(  r   r*   cross_attentions)rF   r  r'  ZtrainingloggerZwarning_once	enumerater&  r#   utils
checkpointr:   r   )r8   r   r   r   r   r   r(  r   r   r   r   r   Zall_self_attentionsZall_cross_attentionsZnext_decoder_cacher   Zlayer_moduleZlayer_head_maskr+  Zlayer_outputsr&   r  r'   rm   )  sv    
	

zAlignTextEncoder.forward)	NNNNNNFFT)r   r    r!   r\   r#   rn   r   r$   r   r   r   r   rm   ro   r&   r&   rh   r'   r"  "  s.   	         r"  c                       s0   e Zd Z fddZejejdddZ  ZS )AlignTextPoolerc                    s*   t    t|j|j| _t | _d S rk   )r[   r\   r   r   r   r  ZTanhrf   rg   rh   r&   r'   r\     s    
zAlignTextPooler.__init__r   c                 C   s(   |d d df }|  |}| |}|S )Nr   )r  rf   )r8   r   Zfirst_token_tensorpooled_outputr&   r&   r'   rm     s    

zAlignTextPooler.forwardr	  r&   r&   rh   r'   r1    s   r1  c                   @   s.   e Zd ZdZeZdZdZdd Zd
ddZ	d	S )AlignPreTrainedModelz
    An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
    models.
    alignTc                 C   s   t |tjtjfr@|jjjd| jjd |j	dk	r|j	j
  npt |trrtj|jj |jj	j
  d|j_n>t |tjr|jjjd| jjd |jdk	r|jj|j 
  t |tjr|j	j
  |jjd dS )zInitialize the weightsg        )meanZstdNTg      ?)rN   r   r   r_   weightdataZnormal_rF   Zinitializer_rangerX   Zzero_
AlignModelinitZxavier_uniform_text_projectionZ_is_hf_initializedr   r   r   Zfill_)r8   r)  r&   r&   r'   _init_weights  s    



z"AlignPreTrainedModel._init_weightsFc                 C   s   t |ttfr||_d S rk   )rN   AlignTextModelAlignVisionModelr'  )r8   r)  r   r&   r&   r'   _set_gradient_checkpointing  s    z0AlignPreTrainedModel._set_gradient_checkpointingN)F)
r   r    r!   r"   r   config_classZbase_model_prefixZsupports_gradient_checkpointingr;  r>  r&   r&   r&   r'   r3    s   r3  z@The text model from ALIGN without any head or projection on top.c                       s   e Zd ZeZdeed fddZdd Zdd Ze	e
eeed	deej eej eej eej eej eej ee ee ee eeef d
ddZ  ZS )r<  T)rF   add_pooling_layerc                    sD   t  | || _t|| _t|| _|r2t|nd | _| 	  d S rk   )
r[   r\   rF   r   r   r"  encoderr1  pooler	post_init)r8   rF   r@  rh   r&   r'   r\     s    

zAlignTextModel.__init__c                 C   s   | j jS rk   r   r   r7   r&   r&   r'   get_input_embeddings  s    z#AlignTextModel.get_input_embeddingsc                 C   s   || j _d S rk   rD  )r8   r   r&   r&   r'   set_input_embeddings  s    z#AlignTextModel.set_input_embeddingsoutput_typer?  N
r   r   r   r   r   r   r   r   r   r2   c
                 C   s  |dk	r|n| j j}|dk	r |n| j j}|	dk	r4|	n| j j}	|dk	rV|dk	rVtdn@|dk	rt| || | }
n"|dk	r| dd }
ntd|
\}}|dk	r|jn|j}|dkrtj	||f|d}|dkr t
| jdr| jjddd|f }|||}|}ntj|
tj|d}| ||
}| || j j}| j||||d}| j||||||	d	}|d
 }| jdk	r| |nd}|	s||f|dd  S t|||j|j|jdS )a?  
        Returns:

        Examples:

        ```python
        >>> from transformers import AutoTokenizer, AlignTextModel

        >>> model = AlignTextModel.from_pretrained("kakaobrain/align-base")
        >>> tokenizer = AutoTokenizer.from_pretrained("kakaobrain/align-base")

        >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")

        >>> outputs = model(**inputs)
        >>> last_hidden_state = outputs.last_hidden_state
        >>> pooled_output = outputs.pooler_output  # pooled (EOS token) states
        ```NzDYou cannot specify both input_ids and inputs_embeds at the same timer   z5You have to specify either input_ids or inputs_embedsr=   r   r   )r   r   r   r   )r   r   r   r   r   r   r   )r   pooler_outputr   r*   r,  )rF   r   r   use_return_dictr   Z%warn_if_padding_and_no_attention_maskr   r>   r#   Zonesr   r   r   r   rq   r   Zget_extended_attention_maskZget_head_maskr%  rA  rB  r   r   r*   r,  )r8   r   r   r   r   r   r   r   r   r   r   Z
batch_sizer   r>   r   r   Zextended_attention_maskembedding_outputencoder_outputsZsequence_outputr2  r&   r&   r'   rm     sb    


zAlignTextModel.forward)T)	NNNNNNNNN)r   r    r!   r   r?  r   r\   rE  rF  r   ALIGN_TEXT_INPUTS_DOCSTRINGr   r   r   r#   rn   r   r   rm   ro   r&   r&   rh   r'   r<    s6   
         
r<  zBThe vision model from ALIGN without any head or projection on top.c                	       sz   e Zd ZeZdZed fddZejdddZ	e
eeeeddeej ee ee eeef d
ddZ  ZS )r=  rj   rQ   c                    s~   t  | || _t|| _t|| _|jdkrDtj	|j
dd| _n.|jdkrbtj|j
dd| _ntd|j |   d S )Nr5  T)Z	ceil_moderI   z2config.pooling must be one of ['mean', 'max'] got )r[   r\   rF   rP   r   r   rA  Zpooling_typer   Z	AvgPool2dZ
hidden_dimrB  Z	MaxPool2dr   ZpoolingrC  rg   rh   r&   r'   r\   ;  s    



zAlignVisionModel.__init__r1   c                 C   s
   | j jjS rk   )vision_modelr   r`   r7   r&   r&   r'   rE  L  s    z%AlignVisionModel.get_input_embeddingsrG  Nrj   r   r   r2   c                 C   s   |dk	r|n| j j}|dk	r |n| j j}|dkr8td| |}| j|||d}|d }| |}||jdd }|s||f|dd  S t	|||j
dS )a  
        Returns:

        Examples:

        ```python
        >>> from PIL import Image
        >>> import requests
        >>> from transformers import AutoProcessor, AlignVisionModel

        >>> model = AlignVisionModel.from_pretrained("kakaobrain/align-base")
        >>> processor = AutoProcessor.from_pretrained("kakaobrain/align-base")

        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> image = Image.open(requests.get(url, stream=True).raw)

        >>> inputs = processor(images=image, return_tensors="pt")

        >>> outputs = model(**inputs)
        >>> last_hidden_state = outputs.last_hidden_state
        >>> pooled_output = outputs.pooler_output  # pooled CLS states
        ```Nz You have to specify pixel_values)r   r   r   rH   r   )r   rJ  r   )rF   r   rK  r   r   rA  rB  Zreshaper   r   r   )r8   rj   r   r   rL  rM  r   r2  r&   r&   r'   rm   O  s*    

zAlignVisionModel.forward)NNN)r   r    r!   r   r?  Zmain_input_namer\   r   ModulerE  r   ALIGN_VISION_INPUTS_DOCSTRINGr   r   r   r#   r$   r   r   r   rm   ro   r&   r&   rh   r'   r=  3  s   
   
r=  c                       s,  e Zd ZeZed fddZeedee	j
 ee	j
 ee	j
 ee	j
 ee	j
 ee	j
 ee ee ee e	jd
ddZeedee	j ee ee e	jdd	d
Zeeeeeddee	j ee	j ee	j
 ee	j
 ee	j
 ee	j
 ee	j
 ee ee ee ee eeef dddZ  ZS )r8  rQ   c                    s   t  | t|jts.tdt|j dt|jtsPtdt|j d|j}|j}|j	| _	|j
| _t|| _t|| _t| j| j	| _tt| jj| _|   d S )NzLconfig.text_config is expected to be of type AlignTextConfig but is of type .zPconfig.vision_config is expected to be of type AlignVisionConfig but is of type )r[   r\   rN   text_configr   r   typevision_configr   Zprojection_dimr   Ztext_embed_dimr<  
text_modelr=  rO  r   r   r:  	Parameterr#   r   rF   Ztemperature_init_valuetemperaturerC  )r8   rF   rT  rV  rh   r&   r'   r\     s$    

zAlignModel.__init__NrI  c
                 C   s   |dk	r|n| j j}|dk	r |n| j j}|	dk	r4|	n| j j}	| j|||||||||	d	}
|
d dddddf }| |}|S )a  
        Returns:
            text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
            applying the projection layer to the pooled output of [`AlignTextModel`].

        Examples:

        ```python
        >>> from transformers import AutoTokenizer, AlignModel

        >>> model = AlignModel.from_pretrained("kakaobrain/align-base")
        >>> tokenizer = AutoTokenizer.from_pretrained("kakaobrain/align-base")

        >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
        >>> text_features = model.get_text_features(**inputs)
        ```N	r   r   r   r   r   r   r   r   r   r   )rF   r   r   rK  rW  r:  )r8   r   r   r   r   r   r   r   r   r   text_outputsr   Ztext_featuresr&   r&   r'   get_text_features  s$    
zAlignModel.get_text_featuresrP  c                 C   sD   |dk	r|n| j j}|dk	r |n| j j}| j|||d}|d }|S )a9  
        Returns:
            image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
            applying the projection layer to the pooled output of [`AlignVisionModel`].

        Examples:

        ```python
        >>> from PIL import Image
        >>> import requests
        >>> from transformers import AutoProcessor, AlignModel

        >>> model = AlignModel.from_pretrained("kakaobrain/align-base")
        >>> processor = AutoProcessor.from_pretrained("kakaobrain/align-base")

        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> image = Image.open(requests.get(url, stream=True).raw)

        >>> inputs = processor(images=image, return_tensors="pt")

        >>> image_features = model.get_image_features(**inputs)
        ```Nrj   r   r   r   )rF   r   rK  rO  )r8   rj   r   r   vision_outputsZimage_featuresr&   r&   r'   get_image_features  s    zAlignModel.get_image_featuresrG  )r   rj   r   r   r   r   r   return_lossr   r   r   r2   c                 C   s.  |	dk	r|	n| j j}	|
dk	r |
n| j j}
|dk	r4|n| j j}| j||
|d}| j|||||||	|
|d	}|d }|d dddddf }| |}||jdddd	 }||jdddd	 }t	||
 | j }|
 }d}|rt|}|s||||||f}|dk	r|f| S |S t|||||||d
S )a}  
        Returns:

        Examples:

        ```python
        >>> from PIL import Image
        >>> import requests
        >>> from transformers import AutoProcessor, AlignModel

        >>> model = AlignModel.from_pretrained("kakaobrain/align-base")
        >>> processor = AutoProcessor.from_pretrained("kakaobrain/align-base")

        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> image = Image.open(requests.get(url, stream=True).raw)

        >>> inputs = processor(
        ...     text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True
        ... )

        >>> outputs = model(**inputs)
        >>> logits_per_image = outputs.logits_per_image  # this is the image-text similarity score
        >>> probs = logits_per_image.softmax(dim=1)  # we can take the softmax to get the label probabilities
        ```Nr]  rZ  r   r   rH   r   T)r   r   Zkeepdim)r,   r-   r.   r)   r   r/   r0   )rF   r   r   rK  rO  rW  r:  Znormr#   r   rD   rY  rE   r+   )r8   r   rj   r   r   r   r   r   r`  r   r   r   r^  r[  r   r)   r.   r-   r,   r  r&   r&   r'   rm     sT    )
zAlignModel.forward)	NNNNNNNNN)NNN)NNNNNNNNNNN)r   r    r!   r   r?  r\   r   rN  r   r#   rn   r   r$   r\  rR  r_  ALIGN_INPUTS_DOCSTRINGr   r+   r   r   r   rm   ro   r&   r&   rh   r'   r8    sz            4   ,
           
r8  )T)Nr"   r   dataclassesr   typingr   r   r   r   r#   Ztorch.utils.checkpointr   Zactivationsr	   Zmodeling_outputsr
   r   r   r   Zmodeling_utilsr   Zpytorch_utilsr   r   r   r/  r   r   r   r   r   Zconfiguration_alignr   r   r   Z
get_loggerr   r-  Z_CHECKPOINT_FOR_DOCZ_CONFIG_FOR_DOCZ#ALIGN_PRETRAINED_MODEL_ARCHIVE_LISTZALIGN_START_DOCSTRINGrN  rR  ra  r   r(   r+   rn   rB   rE   rJ   rK   r   rO   rQ  rP   r_   rp   rw   r   r   r   r   r   r   r   r  r
  r  r  r  r"  r1  r3  r<  r=  r8  r&   r&   r&   r'   <module>   s|   
28&('!QKA 2Wc!zT