U
    ,-ev                     @   s  d Z ddlZddlmZmZmZmZ ddlZddlZddlm	Z	 ddl
mZmZ ddlmZ ddlmZmZmZ dd	lmZ dd
lmZmZ ddlmZmZmZmZ ddlmZ ee Z!dZ"dZ#dgZ$G dd de	j%Z&G dd de	j%Z'G dd de	j%Z(G dd de	j%Z)G dd de	j%Z*G dd de	j%Z+G dd de	j%Z,G dd de	j%Z-G d d! d!e	j%Z.G d"d# d#e	j%Z/G d$d% d%eZ0d&Z1d'Z2ed(e1G d)d* d*e0Z3ed+e1G d,d- d-e0Z4dS ).z PyTorch ViViT model.    N)OptionalSetTupleUnion)nn)CrossEntropyLossMSELoss   )ACT2FN)BaseModelOutputBaseModelOutputWithPoolingImageClassifierOutput)PreTrainedModel) find_pruneable_heads_and_indicesprune_linear_layer)add_start_docstrings%add_start_docstrings_to_model_forwardloggingreplace_return_docstrings   )VivitConfigzgoogle/vivit-b-16x2-kinetics400r   c                       s(   e Zd ZdZ fddZdd Z  ZS )VivitTubeletEmbeddingsa  
    Construct Vivit Tubelet embeddings.

    This module turns a batch of videos of shape (batch_size, num_frames, num_channels, height, width) into a tensor of
    shape (batch_size, seq_len, hidden_size) to be consumed by a Transformer encoder.

    The seq_len (the number of patches) equals (number of frames // tubelet_size[0]) * (height // tubelet_size[1]) *
    (width // tubelet_size[2]).
    c                    s|   t    |j| _|j| _|j| _| j| jd  | j| jd   | j| jd   | _|j| _t	j
|j|j|j|jd| _d S )N   r   r   )Zkernel_sizeZstride)super__init__
num_frames
image_sizeZtubelet_sizeZ
patch_sizenum_patcheshidden_sizeZ	embed_dimr   Conv3dnum_channels
projectionselfconfig	__class__ i/var/www/html/Darija-Ai-Train/env/lib/python3.8/site-packages/transformers/models/vivit/modeling_vivit.pyr   8   s"    
   zVivitTubeletEmbeddings.__init__c              
   C   s   |j \}}}}}|| jks$|| jkrJtd| d| d| j d| j d	|ddddd	}| |}| |ddd}|S )
NzInput image size (*z) doesn't match model (z).r   r   r   r	      )shaper   
ValueErrorpermuter!   flatten	transpose)r#   pixel_values
batch_sizer   r    heightwidthxr'   r'   r(   forwardH   s     
zVivitTubeletEmbeddings.forward__name__
__module____qualname____doc__r   r5   __classcell__r'   r'   r%   r(   r   -   s   
r   c                       s(   e Zd ZdZ fddZdd Z  ZS )VivitEmbeddingsz
    Vivit Embeddings.

    Creates embeddings from a video using VivitTubeletEmbeddings, adds CLS token and positional embeddings.
    c                    sd   t    ttdd|j| _t|| _	ttd| j	j
d |j| _t|j| _|| _d S )Nr   )r   r   r   	ParametertorchZzerosr   	cls_tokenr   patch_embeddingsr   position_embeddingsDropouthidden_dropout_probdropoutr$   r"   r%   r'   r(   r   _   s    

zVivitEmbeddings.__init__c                 C   sP   |j d }| |}| j|ddg}tj||fdd}|| j }| |}|S Nr   r   dim)r+   r@   r?   Ztiler>   catrA   rD   )r#   r0   r1   
embeddingsZ
cls_tokensr'   r'   r(   r5   k   s    



zVivitEmbeddings.forwardr6   r'   r'   r%   r(   r<   X   s   r<   c                       sl   e Zd Zedd fddZejejdddZdeej e	e
eejejf eej f d	d
dZ  ZS )VivitSelfAttentionNr$   returnc                    s   t    |j|j dkr@t|ds@td|jf d|j d|j| _t|j|j | _| j| j | _t	j
|j| j|jd| _t	j
|j| j|jd| _t	j
|j| j|jd| _t	|j| _d S )Nr   Zembedding_sizezThe hidden size z4 is not a multiple of the number of attention heads .)bias)r   r   r   num_attention_headshasattrr,   intattention_head_sizeall_head_sizer   LinearZqkv_biasquerykeyvaluerB   Zattention_probs_dropout_probrD   r"   r%   r'   r(   r   }   s    
zVivitSelfAttention.__init__)r4   rL   c                 C   s6   |  d d | j| jf }||}|ddddS )Nr   r   r   r	   )sizerO   rR   viewr-   )r#   r4   Znew_x_shaper'   r'   r(   transpose_for_scores   s    
z'VivitSelfAttention.transpose_for_scoresF)	head_maskoutput_attentionsrL   c                 C   s   |  |}| | |}| | |}| |}t||dd}|t| j	 }t
jj|dd}	| |	}	|d k	r|	| }	t|	|}
|
dddd }
|
 d d | jf }|
|}
|r|
|	fn|
f}|S )NrX   rF   r   r   r   r	   )rU   r[   rV   rW   r>   matmulr/   mathsqrtrR   r   Z
functionalZsoftmaxrD   r-   
contiguousrY   rS   rZ   )r#   hidden_statesr\   r]   Zmixed_query_layerZ	key_layerZvalue_layerZquery_layerZattention_scoresZattention_probsZcontext_layerZnew_context_layer_shapeoutputsr'   r'   r(   r5      s     



zVivitSelfAttention.forward)NF)r7   r8   r9   r   r   r>   Tensorr[   r   boolr   r   r5   r;   r'   r'   r%   r(   rJ   |   s       rJ   c                       s@   e Zd ZdZedd fddZejejejdddZ  Z	S )	VivitSelfOutputz
    The residual connection is defined in VivitLayer instead of here (as is the case with other models), due to the
    layernorm applied before each block.
    NrK   c                    s.   t    t|j|j| _t|j| _d S N)	r   r   r   rT   r   denserB   rC   rD   r"   r%   r'   r(   r      s    
zVivitSelfOutput.__init__)rc   input_tensorrL   c                 C   s   |  |}| |}|S rh   ri   rD   r#   rc   rj   r'   r'   r(   r5      s    

zVivitSelfOutput.forward)
r7   r8   r9   r:   r   r   r>   re   r5   r;   r'   r'   r%   r(   rg      s   rg   c                       sp   e Zd Zedd fddZee ddddZdej	e
ej	 eeeej	ej	f eej	 f d	d
dZ  ZS )VivitAttentionNrK   c                    s*   t    t|| _t|| _t | _d S rh   )r   r   rJ   	attentionrg   outputsetpruned_headsr"   r%   r'   r(   r      s    


zVivitAttention.__init__)headsrL   c                 C   s   t |dkrd S t|| jj| jj| j\}}t| jj|| j_t| jj|| j_t| jj	|| j_	t| j
j|dd| j
_| jjt | | j_| jj| jj | j_| j|| _d S rE   )lenr   rn   rO   rR   rq   r   rU   rV   rW   ro   ri   rS   union)r#   rr   indexr'   r'   r(   prune_heads   s       zVivitAttention.prune_headsF)rc   r\   r]   rL   c                 C   s4   |  |||}| |d |}|f|dd   }|S )Nr   r   )rn   ro   )r#   rc   r\   r]   Zself_outputsattention_outputrd   r'   r'   r(   r5      s    zVivitAttention.forward)NF)r7   r8   r9   r   r   r   rQ   rv   r>   re   r   rf   r   r   r5   r;   r'   r'   r%   r(   rm      s     rm   c                       s$   e Zd Z fddZdd Z  ZS )VivitIntermediatec                    sP   t    t|j|j| _t|j| _	t
|jtrDt|j | _n|j| _d S rh   )r   r   r   rT   r   intermediate_sizeri   rB   rC   rD   
isinstanceZ
hidden_actstrr
   intermediate_act_fnr"   r%   r'   r(   r      s    
zVivitIntermediate.__init__c                 C   s"   |  |}| |}| |}|S rh   )ri   r|   rD   )r#   rc   r'   r'   r(   r5      s    


zVivitIntermediate.forwardr7   r8   r9   r   r5   r;   r'   r'   r%   r(   rx      s   	rx   c                       s$   e Zd Z fddZdd Z  ZS )VivitOutputc                    s.   t    t|j|j| _t|j| _	d S rh   )
r   r   r   rT   ry   r   ri   rB   rC   rD   r"   r%   r'   r(   r     s    
zVivitOutput.__init__c                 C   s    |  |}| |}|| }|S rh   rk   rl   r'   r'   r(   r5     s    

zVivitOutput.forwardr}   r'   r'   r%   r(   r~     s   r~   c                       s*   e Zd ZdZ fddZdddZ  ZS )	
VivitLayerzNThis corresponds to the EncoderBlock class in the scenic/vivit implementation.c                    sb   t    |j| _d| _t|| _t|| _t|| _	t
j|j|jd| _t
j|j|jd| _d S )Nr   Zeps)r   r   Zchunk_size_feed_forwardZseq_len_dimrm   rn   rx   intermediater~   ro   r   	LayerNormr   layer_norm_epslayernorm_beforelayernorm_afterr"   r%   r'   r(   r     s    



zVivitLayer.__init__NFc                 C   s`   | j | |||d}|d }|dd  }|| }| |}| |}| ||}|f| }|S )Nr]   r   r   )rn   r   r   r   ro   )r#   rc   r\   r]   Zself_attention_outputsrw   rd   Zlayer_outputr'   r'   r(   r5   "  s    


zVivitLayer.forward)NFr6   r'   r'   r%   r(   r     s   
r   c                       s&   e Zd Z fddZdddZ  ZS )	VivitEncoderc                    s:   t     | _t fddt jD | _d| _d S )Nc                    s   g | ]}t  qS r'   )r   ).0_r$   r'   r(   
<listcomp>@  s     z)VivitEncoder.__init__.<locals>.<listcomp>F)	r   r   r$   r   Z
ModuleListrangenum_hidden_layerslayergradient_checkpointingr"   r%   r   r(   r   =  s    
 zVivitEncoder.__init__NFTc                    s   |rdnd } rdnd }t | jD ]\}}	|r8||f }|d k	rH|| nd }
| jr|| jr| fdd}tjj||	||
}n|	||
 }|d } r"||d f }q"|r||f }|stdd |||fD S t|||dS )	Nr'   c                    s    fdd}|S )Nc                     s    | f S rh   r'   )inputs)moduler]   r'   r(   custom_forwardW  s    zKVivitEncoder.forward.<locals>.create_custom_forward.<locals>.custom_forwardr'   )r   r   r   )r   r(   create_custom_forwardV  s    z3VivitEncoder.forward.<locals>.create_custom_forwardr   r   c                 s   s   | ]}|d k	r|V  qd S rh   r'   )r   vr'   r'   r(   	<genexpr>m  s      z'VivitEncoder.forward.<locals>.<genexpr>)last_hidden_staterc   
attentions)		enumerater   r   Ztrainingr>   utils
checkpointtupler   )r#   rc   r\   r]   output_hidden_statesreturn_dictZall_hidden_statesZall_self_attentionsiZlayer_moduleZlayer_head_maskr   Zlayer_outputsr'   r   r(   r5   C  s4    

zVivitEncoder.forward)NFFTr}   r'   r'   r%   r(   r   <  s   	    r   c                       s$   e Zd Z fddZdd Z  ZS )VivitPoolerc                    s*   t    t|j|j| _t | _d S rh   )r   r   r   rT   r   ri   ZTanh
activationr"   r%   r'   r(   r   v  s    
zVivitPooler.__init__c                 C   s(   |d d df }|  |}| |}|S )Nr   )ri   r   )r#   rc   Zfirst_token_tensorpooled_outputr'   r'   r(   r5   {  s    

zVivitPooler.forwardr}   r'   r'   r%   r(   r   u  s   r   c                   @   s2   e Zd ZdZeZdZdZdZdd Z	ddd	Z
d
S )VivitPreTrainedModelz
    An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
    models.
    vivitr0   Tc                 C   s   t |tjtjfr@|jjjd| jjd |j	dk	r|j	j
  nt |tjr|jjjd| jjd |jdk	r|jj|j 
  nHt |tjr|j	j
  |jjd n t |tjr|jjd| jjd dS )zInitialize the weightsg        )ZmeanZstdNg      ?)rz   r   rT   r   weightdataZnormal_r$   Zinitializer_rangerN   Zzero_Z	EmbeddingZpadding_idxr   Zfill_r=   )r#   r   r'   r'   r(   _init_weights  s    

z"VivitPreTrainedModel._init_weightsFc                 C   s   t |tr||_d S rh   )rz   r   r   )r#   r   rW   r'   r'   r(   _set_gradient_checkpointing  s    
z0VivitPreTrainedModel._set_gradient_checkpointingN)F)r7   r8   r9   r:   r   config_classZbase_model_prefixZmain_input_nameZsupports_gradient_checkpointingr   r   r'   r'   r'   r(   r     s   r   aG  
    This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
    as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
    behavior.

    Parameters:
        config ([`VivitConfig`]): Model configuration class with all the parameters of the model.
            Initializing with a config file does not load the weights associated with the model, only the
            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
a\  
    Args:
        pixel_values (`torch.FloatTensor` of shape `(batch_size, num_frames, num_channels, height, width)`):
            Pixel values. Pixel values can be obtained using [`VivitImageProcessor`]. See
            [`VivitImageProcessor.preprocess`] for details.

        head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
            Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:

            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.

        output_attentions (`bool`, *optional*):
            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
            tensors for more detail.
        output_hidden_states (`bool`, *optional*):
            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
            more detail.
        return_dict (`bool`, *optional*):
            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
z_The bare ViViT Transformer model outputting raw hidden-states without any specific head on top.c                       s   e Zd Zd fdd	Zdd Zdd Zeeee	e
ddeej eej ee ee ee eeej e	f d
ddZ  ZS )
VivitModelTc                    sX   t  | || _t|| _t|| _tj|j	|j
d| _|rFt|nd | _|   d S )Nr   )r   r   r$   r<   rI   r   encoderr   r   r   r   	layernormr   pooler	post_init)r#   r$   add_pooling_layerr%   r'   r(   r     s    

zVivitModel.__init__c                 C   s   | j jS rh   )rI   r@   )r#   r'   r'   r(   get_input_embeddings  s    zVivitModel.get_input_embeddingsc                 C   s*   |  D ]\}}| jj| j| qdS )z
        Prunes heads of the model.

        Args:
            heads_to_prune:
                dict of {layer_num: list of heads to prune in this layer}
        N)itemsr   r   rn   rv   )r#   Zheads_to_pruner   rr   r'   r'   r(   _prune_heads  s    zVivitModel._prune_headsoutput_typer   N)r0   r\   r]   r   r   rL   c           
      C   s   |dk	r|n| j j}|dk	r |n| j j}|dk	r4|n| j j}|dkrLtd| || j j}| |}| j|||||d}|d }| 	|}| j
dk	r| 
|nd}	|s||	f|dd  S t||	|j|jdS )a  
        Returns:

        Examples:

        ```python
        >>> import av
        >>> import numpy as np

        >>> from transformers import VivitImageProcessor, VivitModel
        >>> from huggingface_hub import hf_hub_download

        >>> np.random.seed(0)


        >>> def read_video_pyav(container, indices):
        ...     '''
        ...     Decode the video with PyAV decoder.
        ...     Args:
        ...         container (`av.container.input.InputContainer`): PyAV container.
        ...         indices (`List[int]`): List of frame indices to decode.
        ...     Returns:
        ...         result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
        ...     '''
        ...     frames = []
        ...     container.seek(0)
        ...     start_index = indices[0]
        ...     end_index = indices[-1]
        ...     for i, frame in enumerate(container.decode(video=0)):
        ...         if i > end_index:
        ...             break
        ...         if i >= start_index and i in indices:
        ...             frames.append(frame)
        ...     return np.stack([x.to_ndarray(format="rgb24") for x in frames])


        >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
        ...     '''
        ...     Sample a given number of frame indices from the video.
        ...     Args:
        ...         clip_len (`int`): Total number of frames to sample.
        ...         frame_sample_rate (`int`): Sample every n-th frame.
        ...         seg_len (`int`): Maximum allowed index of sample's last frame.
        ...     Returns:
        ...         indices (`List[int]`): List of sampled frame indices
        ...     '''
        ...     converted_len = int(clip_len * frame_sample_rate)
        ...     end_idx = np.random.randint(converted_len, seg_len)
        ...     start_idx = end_idx - converted_len
        ...     indices = np.linspace(start_idx, end_idx, num=clip_len)
        ...     indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
        ...     return indices


        >>> # video clip consists of 300 frames (10 seconds at 30 FPS)
        >>> file_path = hf_hub_download(
        ...     repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
        ... )
        >>> container = av.open(file_path)

        >>> # sample 32 frames
        >>> indices = sample_frame_indices(clip_len=32, frame_sample_rate=1, seg_len=container.streams.video[0].frames)
        >>> video = read_video_pyav(container=container, indices=indices)

        >>> image_processor = VivitImageProcessor.from_pretrained("google/vivit-b-16x2-kinetics400")
        >>> model = VivitModel.from_pretrained("google/vivit-b-16x2-kinetics400")

        >>> # prepare video for the model
        >>> inputs = image_processor(list(video), return_tensors="pt")

        >>> # forward pass
        >>> outputs = model(**inputs)
        >>> last_hidden_states = outputs.last_hidden_state
        >>> list(last_hidden_states.shape)
        [1, 3137, 768]
        ```Nz You have to specify pixel_valuesr\   r]   r   r   r   r   )r   Zpooler_outputrc   r   )r$   r]   r   use_return_dictr,   Zget_head_maskr   rI   r   r   r   r   rc   r   )
r#   r0   r\   r]   r   r   Zembedding_outputZencoder_outputssequence_outputr   r'   r'   r(   r5     s4    V

zVivitModel.forward)T)NNNNN)r7   r8   r9   r   r   r   r   VIVIT_INPUTS_DOCSTRINGr   r   _CONFIG_FOR_DOCr   r>   FloatTensorrf   r   r   r5   r;   r'   r'   r%   r(   r     s$   
     r   zViViT Transformer model with a video classification head on top (a linear layer on top of the final hidden state of the
[CLS] token) e.g. for Kinetics-400.c                       sx   e Zd Z fddZeeeeedde	e
j e	e
j e	e
j e	e e	e e	e eee
j ef dddZ  ZS )	VivitForVideoClassificationc                    sR   t  | |j| _t|dd| _|jdkr<t|j|jnt | _	| 
  d S )NF)r   r   )r   r   
num_labelsr   r   r   rT   r   ZIdentity
classifierr   r"   r%   r'   r(   r   g  s
    $z$VivitForVideoClassification.__init__r   N)r0   r\   labelsr]   r   r   rL   c                 C   s   |dk	r|n| j j}| j|||||d}|d }| |dddddf }	d}
|dk	r| jdkrt }||	d|d}
n t }||	d| j|d}
|s|	f|dd  }|
dk	r|
f| S |S t|
|	|j	|j
dS )a(  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).

        Returns:

        Examples:

        ```python
        >>> import av
        >>> import numpy as np
        >>> import torch

        >>> from transformers import VivitImageProcessor, VivitForVideoClassification
        >>> from huggingface_hub import hf_hub_download

        >>> np.random.seed(0)


        >>> def read_video_pyav(container, indices):
        ...     '''
        ...     Decode the video with PyAV decoder.
        ...     Args:
        ...         container (`av.container.input.InputContainer`): PyAV container.
        ...         indices (`List[int]`): List of frame indices to decode.
        ...     Returns:
        ...         result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
        ...     '''
        ...     frames = []
        ...     container.seek(0)
        ...     start_index = indices[0]
        ...     end_index = indices[-1]
        ...     for i, frame in enumerate(container.decode(video=0)):
        ...         if i > end_index:
        ...             break
        ...         if i >= start_index and i in indices:
        ...             frames.append(frame)
        ...     return np.stack([x.to_ndarray(format="rgb24") for x in frames])


        >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
        ...     '''
        ...     Sample a given number of frame indices from the video.
        ...     Args:
        ...         clip_len (`int`): Total number of frames to sample.
        ...         frame_sample_rate (`int`): Sample every n-th frame.
        ...         seg_len (`int`): Maximum allowed index of sample's last frame.
        ...     Returns:
        ...         indices (`List[int]`): List of sampled frame indices
        ...     '''
        ...     converted_len = int(clip_len * frame_sample_rate)
        ...     end_idx = np.random.randint(converted_len, seg_len)
        ...     start_idx = end_idx - converted_len
        ...     indices = np.linspace(start_idx, end_idx, num=clip_len)
        ...     indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
        ...     return indices


        >>> # video clip consists of 300 frames (10 seconds at 30 FPS)
        >>> file_path = hf_hub_download(
        ...     repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
        ... )
        >>> container = av.open(file_path)

        >>> # sample 32 frames
        >>> indices = sample_frame_indices(clip_len=32, frame_sample_rate=4, seg_len=container.streams.video[0].frames)
        >>> video = read_video_pyav(container=container, indices=indices)

        >>> image_processor = VivitImageProcessor.from_pretrained("google/vivit-b-16x2-kinetics400")
        >>> model = VivitForVideoClassification.from_pretrained("google/vivit-b-16x2-kinetics400")

        >>> inputs = image_processor(list(video), return_tensors="pt")

        >>> with torch.no_grad():
        ...     outputs = model(**inputs)
        ...     logits = outputs.logits

        >>> # model predicts one of the 400 Kinetics-400 classes
        >>> predicted_label = logits.argmax(-1).item()
        >>> print(model.config.id2label[predicted_label])
        LABEL_116
        ```Nr   r   r   rX   r   )losslogitsrc   r   )r$   r   r   r   r   r   rZ   r   r   rc   r   )r#   r0   r\   r   r]   r   r   rd   r   r   r   Zloss_fctro   r'   r'   r(   r5   s  s4    _
z#VivitForVideoClassification.forward)NNNNNN)r7   r8   r9   r   r   r   r   r   r   r   r>   r   Z
LongTensorrf   r   r   r5   r;   r'   r'   r%   r(   r   a  s$   
      r   )5r:   r`   typingr   r   r   r   r>   Ztorch.utils.checkpointr   Ztorch.nnr   r   Zactivationsr
   Zmodeling_outputsr   r   r   Zmodeling_utilsr   Zpytorch_utilsr   r   r   r   r   r   r   Zconfiguration_vivitr   Z
get_loggerr7   loggerZ_CHECKPOINT_FOR_DOCr   Z#VIVIT_PRETRAINED_MODEL_ARCHIVE_LISTModuler   r<   rJ   rg   rm   rx   r~   r   r   r   r   ZVIVIT_START_DOCSTRINGr   r   r   r'   r'   r'   r(   <module>   sR   
+$=''9" 