U
    9%ej0                     @   s   d Z ddlmZ ddlmZmZmZmZ ddlm	Z	 ddl
mZ ddlmZ ddlmZ dd	lmZ dd
lmZmZ eeZddiZG dd de	ZG dd deZdS )z Perceiver model configuration    )OrderedDict)AnyMappingOptionalUnion   )PretrainedConfig)FeatureExtractionMixin)
OnnxConfig) compute_effective_axis_dimension)PreTrainedTokenizerBase)
TensorTypeloggingzdeepmind/language-perceiverzKhttps://huggingface.co/deepmind/language-perceiver/resolve/main/config.jsonc                       sh   e Zd ZdZdZdddddddd	d	d
ddddddddddddgdddddddgddf fdd	Z  ZS )PerceiverConfiga  
    This is the configuration class to store the configuration of a [`PerceiverModel`]. It is used to instantiate an
    Perceiver model according to the specified arguments, defining the model architecture. Instantiating a
    configuration with the defaults will yield a similar configuration to that of the Perceiver
    [deepmind/language-perceiver](https://huggingface.co/deepmind/language-perceiver) architecture.

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.

    Args:
        num_latents (`int`, *optional*, defaults to 256):
            The number of latents.
        d_latents (`int`, *optional*, defaults to 1280):
            Dimension of the latent embeddings.
        d_model (`int`, *optional*, defaults to 768):
            Dimension of the inputs. Should only be provided in case [*PerceiverTextPreprocessor*] is used or no
            preprocessor is provided.
        num_blocks (`int`, *optional*, defaults to 1):
            Number of blocks in the Transformer encoder.
        num_self_attends_per_block (`int`, *optional*, defaults to 26):
            The number of self-attention layers per block.
        num_self_attention_heads (`int`, *optional*, defaults to 8):
            Number of attention heads for each self-attention layer in the Transformer encoder.
        num_cross_attention_heads (`int`, *optional*, defaults to 8):
            Number of attention heads for each cross-attention layer in the Transformer encoder.
        qk_channels (`int`, *optional*):
            Dimension to project the queries + keys before applying attention in the cross-attention and self-attention
            layers of the encoder. Will default to preserving the dimension of the queries if not specified.
        v_channels (`int`, *optional*):
            Dimension to project the values before applying attention in the cross-attention and self-attention layers
            of the encoder. Will default to preserving the dimension of the queries if not specified.
        cross_attention_shape_for_attention (`str`, *optional*, defaults to `'kv'`):
            Dimension to use when downsampling the queries and keys in the cross-attention layer of the encoder.
        self_attention_widening_factor (`int`, *optional*, defaults to 1):
            Dimension of the feed-forward layer in the cross-attention layer of the Transformer encoder.
        cross_attention_widening_factor (`int`, *optional*, defaults to 1):
            Dimension of the feed-forward layer in the self-attention layers of the Transformer encoder.
        hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
            The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
            `"relu"`, `"selu"` and `"gelu_new"` are supported.
        attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
            The dropout ratio for the attention probabilities.
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        layer_norm_eps (`float`, *optional*, defaults to 1e-12):
            The epsilon used by the layer normalization layers.
        use_query_residual (`float`, *optional*, defaults to `True`):
            Whether to add a query residual in the cross-attention layer of the encoder.
        vocab_size (`int`, *optional*, defaults to 262):
            Vocabulary size for the masked language modeling model.
        max_position_embeddings (`int`, *optional*, defaults to 2048):
            The maximum sequence length that the masked language modeling model might ever be used with. Typically set
            this to something large just in case (e.g., 512 or 1024 or 2048).
        image_size (`int`, *optional*, defaults to 56):
            Size of the images after preprocessing, for [`PerceiverForImageClassificationLearned`].
        train_size (`List[int]`, *optional*, defaults to [368, 496]):
            Training size of the images for the optical flow model.
        num_frames (`int`, *optional*, defaults to 16):
            Number of video frames used for the multimodal autoencoding model.
        audio_samples_per_frame (`int`, *optional*, defaults to 1920):
            Number of audio samples per frame for the multimodal autoencoding model.
        samples_per_patch (`int`, *optional*, defaults to 16):
            Number of audio samples per patch when preprocessing the audio for the multimodal autoencoding model.
        output_num_channels (`int`, *optional*, defaults to 512):
            Number of output channels for each modalitiy decoder.
        output_shape (`List[int]`, *optional*, defaults to `[1, 16, 224, 224]`):
            Shape of the output (batch_size, num_frames, height, width) for the video decoder queries of the multimodal
            autoencoding model. This excludes the channel dimension.

    Example:

    ```python
    >>> from transformers import PerceiverModel, PerceiverConfig

    >>> # Initializing a Perceiver deepmind/language-perceiver style configuration
    >>> configuration = PerceiverConfig()

    >>> # Initializing a model from the deepmind/language-perceiver style configuration
    >>> model = PerceiverModel(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```Z	perceiver   i   i            NkvZgelug?g{Gz?g-q=Ti  i   8   ip  i     i     i   i   c                    s   t  jf | || _|| _|| _|| _|| _|| _|| _|| _	|	| _
|
| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _d S )N)super__init__num_latents	d_latentsd_model
num_blocksnum_self_attends_per_blocknum_self_attention_headsnum_cross_attention_headsqk_channels
v_channels#cross_attention_shape_for_attentionself_attention_widening_factorcross_attention_widening_factor
hidden_actattention_probs_dropout_probinitializer_rangelayer_norm_epsuse_query_residual
vocab_sizemax_position_embeddings
image_size
train_size
num_framesaudio_samples_per_framesamples_per_patchoutput_shapeoutput_num_channels_label_trainable_num_channels)selfr   r   r   r   r   r   r    r!   r"   r#   r$   r%   r&   r'   r(   r)   r*   r+   r,   r-   r.   r/   r0   r1   r2   r3   r4   kwargs	__class__ t/var/www/html/Darija-Ai-API/env/lib/python3.8/site-packages/transformers/models/perceiver/configuration_perceiver.pyr   z   s8    zPerceiverConfig.__init__)__name__
__module____qualname____doc__Z
model_typer   __classcell__r9   r9   r7   r:   r   $   s<   S
r   c                   @   sr   e Zd Zeeeeeef f dddZeedddZ	de
d eeeeee eeeeeef d
ddZdS )PerceiverOnnxConfig)returnc                 C   s6   | j dkrdddd}n
ddd}td|fd|fgS )	Nzmultiple-choicebatchchoicesequence)r   r      )r   r   inputsZattention_mask)taskr   )r5   Zdynamic_axisr9   r9   r:   rF      s    

zPerceiverOnnxConfig.inputsc                 C   s   dS )Ng-C6?r9   )r5   r9   r9   r:   atol_for_validation   s    z'PerceiverOnnxConfig.atol_for_validationFNr   (   )r   r	   )
preprocessor
batch_size
seq_lengthnum_choicesis_pair	frameworknum_channelsimage_widthimage_heightrA   c
                 C   s   t |trlt|tjdd}||}
t|tj|
d}ddg| g| }t|||d}|	d|d< |S t |t
r|jd dkrt|tjd	}| |||	|}t|||d
}|	d|d< |S tdd S )Nr   )fixed_dimensionZnum_token_to_add a)return_tensorsZ	input_idsrF   Zpixel_values)rT   )ZimagesrW   z\Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.)
isinstancer   r   r
   Zdefault_fixed_batchZnum_special_tokens_to_addZdefault_fixed_sequencejoindictpopr	   Zmodel_input_namesZ_generate_dummy_images
ValueError)r5   rK   rL   rM   rN   rO   rP   rQ   rR   rS   Ztoken_to_addZdummy_inputrF   r9   r9   r:   generate_dummy_inputs   s2    
  
  z)PerceiverOnnxConfig.generate_dummy_inputs)rI   rI   rI   FNr   rJ   rJ   )r;   r<   r=   propertyr   strintrF   floatrH   r   boolr   r   r   r]   r9   r9   r9   r:   r@      s0            
r@   N)r>   collectionsr   typingr   r   r   r   Zconfiguration_utilsr   Zfeature_extraction_utilsr	   Zonnxr
   Z
onnx.utilsr   Ztokenization_utils_baser   utilsr   r   Z
get_loggerr;   loggerZ'PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAPr   r@   r9   r9   r9   r:   <module>   s   
  