U
    ,È-eÂ1  ã                   @   sv   d Z ddlmZ ddlmZ ddlmZ ddlmZ ddl	m
Z
 e
 e¡Zdd	iZG d
d„ deƒZG dd„ deƒZdS )z MEGA configurationé    )ÚOrderedDict)ÚMappingé   )ÚPretrainedConfig)Ú
OnnxConfig)Úloggingzmnaylor/mega-base-wikitextzJhttps://huggingface.co/mnaylor/mega-base-wikitext/resolve/main/config.jsonc                %       s&   e Zd ZdZdZd‡ fdd„	Z‡  ZS )Ú
MegaConfigaS  
    This is the configuration class to store the configuration of a [`MegaModel`]. It is used to instantiate a Mega
    model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
    defaults will yield a similar configuration to that of the Mega
    [mnaylor/mega-base-wikitext](https://huggingface.co/mnaylor/mega-base-wikitext) architecture.

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.


    Args:
        vocab_size (`int`, *optional*, defaults to 30522):
            Vocabulary size of the Mega model. Defines the number of different tokens that can be represented by the
            `inputs_ids` passed when calling [`MegaModel`].
        hidden_size (`int`, *optional*, defaults to 128):
            Dimensionality of the encoder layers and the pooler layer.
        num_hidden_layers (`int`, *optional*, defaults to 4):
            Number of hidden layers in the Mega encoder.
        intermediate_size (`int`, *optional*, defaults to 256):
            Dimensionality of the hidden size (self-attention value projection) within the Mega encoder
        ema_projection_size (`int`, *optional*, defaults to 16):
            Dimensionality of the MegaMultiDimensionDampedEma
        bidirectional (`bool`, *optional*, defaults to `True`):
            Whether the MegaMultiDimensionDampedEma used in Mega's self-attention should work bidirectionally (`True`)
            or unidirectionally (`False`). Bidirectional EMA is incompatible with causal decoding, so this should be
            False if you intend to use the model as a decoder.
        shared_representation_size (`int`, *optional*, defaults to 64):
            Dimensionality of the linear projection for shared representation of self-attention queries and keys
        use_chunking (`bool`, *optional*, defaults to `False`):
            Whether to chunk inputs for linear self-attention complexity (described as Mega-chunk in the paper)
        chunk_size (`int`, *optional*, defaults to -1):
            If `use_chunking` is set to `True`, determines the size of the chunks to apply to the input sequence. If
            chunking is used, input sequences must be padded to a multiple of `chunk_size`
        truncation (`int`, *optional*):
            If specified, the sequence length for which to truncate MegaMultiDimensionDampedEma
        normalize_before_mega (`bool`, *optional*, defaults to `True`):
            Whether to normalize before (`True`) or after (`False`) passing through Mega encoder blocks
        normalization_type (`str`, *optional*, defaults to `"scalenorm"`):
            Type of normalization to use in Mega encoder blocks. Choose one of `"scalenorm"`, `"layernorm"`,
            `"rmsnorm"`, `"batchnorm"`, or `"syncbatchnorm"` (GPU required for syncbatchnorm)
        norm_affine (`bool`, *optional*, defaults to `True`):
            If `True`, applies a parameterized affine transformation to inputs during normalization
        activation (`str`, *optional*, defaults to `"silu"`):
            Activation function to apply within Mega encoder blocks. Choose one of `"silu"`, `"relu"`, `"linear"`,
            `"gelu"`, or `"gelu_accurate"`
        attention_activation (`str`, *optional*, defaults to `"softmax"`):
            Activation function to apply for single-headed self-attention (a la Transformer). Choose one of
            `"softmax"`, `"laplace"`, or `"relu2"`
        dropout_prob (`float`, *optional*, defaults to 0.1):
            The dropout probability for EMA self-attention
        hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
            The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
        attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
            The dropout ratio for the attention probabilities.
        use_feature_dropout (`bool`, *optional*, defaults to `False`):
            Whether to use feature-based (`True`) or standard dropout (`False`)
        use_normalized_ffn (`bool`, *optional*, defaults to `True`):
            Whether to use the normalized feed-forward sub-layer in Mega blocks (`True`) or pass Mega encoder output
            as-is (`False`)
        nffn_hidden_size (`int`, *optional*, defaults to 256):
            If using the normalized feed-forward network (NFFN) layer within Mega (`use_normalized_ffn = True`), this
            is the hidden size of the NFFN
        normalize_before_ffn (`bool`, *optional*, defaults to `True`):
            Whether to normalize before (`True`) or after (`False`) the feed-forward portion of NFFN
        nffn_activation_dropout_prob (`float`, *optional*, defaults to 0.1):
            The dropout ratio for the NFFN component.
        max_positions (`int`, *optional*, defaults to 2048):
            The maximum sequence length to use for positional representations. For `"simple"` relative positional bias,
            this is a hard limit on input length; `"rotary"` relative positional bias will extrapolate to longer
            sequences
        add_token_type_embeddings (`bool`, *optional*, defaults to `True`):
            Whether to account for token types in embeddings. Left as optional to maintain compatibility with original
            implementation while adding support for token types.
        type_vocab_size (`int`, *optional*, defaults to 2):
            The vocabulary size of the `token_type_ids` passed when calling [`MegaModel`]. Only used if
            `add_token_type_embeddings = True`
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        ema_delta_alpha_range (`float`, *optional*, defaults to 0.2):
            The standard deviation for initializing the delta (damping factor) and alpha (decay factor) parameters in
            MegaMultiDimensionDampedEma.
        ema_beta_range (`float`, *optional*, defaults to 0.02):
            The standard deviation for initializing the beta parameter (expansion matrix) in
            MegaMultiDimensionDampedEma.
        ema_gamma_omega_range (`float`, *optional*, defaults to 1.0):
            The standard deviation for initializing the gamma (projection matrix) and omega (residual weight)
            parameters in MultiDimensionEMA.
        relative_positional_bias (`str`, *optional*, defaults to `"rotary"`):
            Type of relative positional encoding. Choose one of `"rotary"` or `"simple"`. If `"simple"` is selected,
            `max_positions` is used as a limit on input size, while `"rotary"` extrapolates beyond `max_positions`.
        is_decoder (`bool`, *optional*, defaults to `False`):
            Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.
        use_cache (`bool`, *optional*, defaults to `True`):
            Whether or not the model should return the last key/values attentions (not used by all models). Only
            relevant if `config.is_decoder=True`.
        classifier_dropout (`float`, *optional*):
            The dropout ratio for the classification head.
        add_lm_hidden_dense_layer (`bool`, *optional*, defaults to `True`):
            Whether to include a hidden layer for projection between encoder outputs and LM heads (`True`) or pass
            hidden states directly to LM head (`False`). Remains optional for compatibility with original
            implementation

    Examples:

    ```python
    >>> from transformers import MegaConfig, MegaModel

    >>> # Initializing a Mega configuration
    >>> configuration = MegaConfig()

    >>> # Initializing a model (with random weights) from the configuration
    >>> model = MegaModel(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```Úmegaé:w  é€   é   é   é   Té@   FéÿÿÿÿNÚ	scalenormÚsiluÚsoftmaxçš™™™™™¹?é   é   ç{®Gáz”?çš™™™™™É?ç      ð?é   r   Úrotaryc&           '         sð   t ƒ jf || |!dœ|&—Ž || _|| _|| _|| _|| _|| _|| _|| _	|| _
|| _|	| _|
| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|"| _ |$| _!|#| _"|%| _#d| _$d S )N)Úpad_token_idÚbos_token_idÚeos_token_idr   )%ÚsuperÚ__init__Ú
vocab_sizeÚhidden_sizeÚnum_hidden_layersÚ
activationÚattention_activationÚintermediate_sizeÚema_projection_sizeÚbidirectionalÚshared_representation_sizeÚuse_chunkingÚ
chunk_sizeÚ
truncationÚnormalize_before_megaÚnormalization_typeÚnorm_affineÚdropout_probÚhidden_dropout_probÚattention_probs_dropout_probÚuse_feature_dropoutÚuse_normalized_ffnÚnffn_hidden_sizeÚnormalize_before_ffnÚnffn_activation_dropout_probÚmax_positionsÚadd_token_type_embeddingsÚtype_vocab_sizeÚinitializer_rangeÚema_delta_alpha_rangeÚema_beta_rangeÚema_gamma_omega_rangeÚrelative_positional_biasÚ	use_cacheÚclassifier_dropoutÚadd_lm_hidden_dense_layerZnum_attention_heads)'Úselfr!   r"   r#   r&   r'   r(   r)   r*   r+   r,   r-   r.   r/   r$   r%   r0   r1   r2   r3   r4   r5   r6   r7   r8   r9   r:   r;   r<   r=   r>   r   r   r   r?   rA   r@   rB   Úkwargs©Ú	__class__© úl/var/www/html/Darija-Ai-Train/env/lib/python3.8/site-packages/transformers/models/mega/configuration_mega.pyr    –   sH    )zMegaConfig.__init__)%r
   r   r   r   r   Tr   Fr   NTr   Tr   r   r   r   r   FTr   Tr   r   Fr   r   r   r   r   r   r   r   r   NTT)Ú__name__Ú
__module__Ú__qualname__Ú__doc__Z
model_typer    Ú__classcell__rG   rG   rE   rH   r      sP   t                                     Úr   c                   @   s.   e Zd Zeeeeeef f dœdd„ƒZdS )ÚMegaOnnxConfig)Úreturnc                 C   s6   | j dkrddddœ}n
dddœ}td|fd|fgƒS )	Nzmultiple-choiceÚbatchÚchoiceÚsequence)r   r   r   )r   r   Z	input_idsZattention_mask)Útaskr   )rC   Zdynamic_axisrG   rG   rH   Úinputsç   s    

þÿzMegaOnnxConfig.inputsN)rI   rJ   rK   Úpropertyr   ÚstrÚintrT   rG   rG   rG   rH   rN   æ   s   rN   N)rL   Úcollectionsr   Útypingr   Zconfiguration_utilsr   Zonnxr   Úutilsr   Z
get_loggerrI   ÚloggerZ"MEGA_PRETRAINED_CONFIG_ARCHIVE_MAPr   rN   rG   rG   rG   rH   Ú<module>   s   
 ÿ H