U
    9%e;:                     @   sR   d Z ddlZddlZddlmZ ddlmZ eeZ	ddiZ
G dd	 d	eZdS )
z Hubert model configuration    N   )PretrainedConfig)loggingzfacebook/hubert-base-ls960zJhttps://huggingface.co/facebook/hubert-base-ls960/resolve/main/config.jsonc                &       s2   e Zd ZdZdZd fdd	Zedd Z  ZS ) HubertConfiga%  
    This is the configuration class to store the configuration of a [`HubertModel`]. It is used to instantiate an
    Hubert model according to the specified arguments, defining the model architecture. Instantiating a configuration
    with the defaults will yield a similar configuration to that of the Hubert
    [facebook/hubert-base-ls960](https://huggingface.co/facebook/hubert-base-ls960) architecture.

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.


    Args:
        vocab_size (`int`, *optional*, defaults to 32):
            Vocabulary size of the Hubert model. Defines the number of different tokens that can be represented by the
            `inputs_ids` passed when calling [`HubertModel`]. Vocabulary size of the model. Defines the different
            tokens that can be represented by the *inputs_ids* passed to the forward method of [`HubertModel`].
        hidden_size (`int`, *optional*, defaults to 768):
            Dimensionality of the encoder layers and the pooler layer.
        num_hidden_layers (`int`, *optional*, defaults to 12):
            Number of hidden layers in the Transformer encoder.
        num_attention_heads (`int`, *optional*, defaults to 12):
            Number of attention heads for each attention layer in the Transformer encoder.
        intermediate_size (`int`, *optional*, defaults to 3072):
            Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
        hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
            The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
            `"relu"`, `"selu"` and `"gelu_new"` are supported.
        hidden_dropout(`float`, *optional*, defaults to 0.1):
            The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
        activation_dropout (`float`, *optional*, defaults to 0.1):
            The dropout ratio for activations inside the fully connected layer.
        attention_dropout(`float`, *optional*, defaults to 0.1):
            The dropout ratio for the attention probabilities.
        final_dropout (`float`, *optional*, defaults to 0.1):
            The dropout probabilitiy for the final projection layer of [`Wav2Vec2ForCTC`].
        layerdrop (`float`, *optional*, defaults to 0.1):
            The LayerDrop probability. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more
            details.
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        layer_norm_eps (`float`, *optional*, defaults to 1e-12):
            The epsilon used by the layer normalization layers.
        feat_extract_norm (`str`, *optional*, defaults to `"group"`):
            The norm to be applied to 1D convolutional layers in feature encoder. One of `"group"` for group
            normalization of only the first 1D convolutional layer or `"layer"` for layer normalization of all 1D
            convolutional layers.
        feat_proj_dropout (`float`, *optional*, defaults to 0.0):
            The dropout probability for output of the feature encoder.
        feat_proj_layer_norm (`bool`, *optional*, defaults to `True`):
            Whether to apply LayerNorm to the output of the feature encoder.
        feat_extract_activation (`str, `optional`, defaults to `"gelu"`):
            The non-linear activation function (function or string) in the 1D convolutional layers of the feature
            extractor. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported.
        conv_dim (`Tuple[int]`, *optional*, defaults to `(512, 512, 512, 512, 512, 512, 512)`):
            A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the
            feature encoder. The length of *conv_dim* defines the number of 1D convolutional layers.
        conv_stride (`Tuple[int]`, *optional*, defaults to `(5, 2, 2, 2, 2, 2, 2)`):
            A tuple of integers defining the stride of each 1D convolutional layer in the feature encoder. The length
            of *conv_stride* defines the number of convolutional layers and has to match the length of *conv_dim*.
        conv_kernel (`Tuple[int]`, *optional*, defaults to `(10, 3, 3, 3, 3, 3, 3)`):
            A tuple of integers defining the kernel size of each 1D convolutional layer in the feature encoder. The
            length of *conv_kernel* defines the number of convolutional layers and has to match the length of
            *conv_dim*.
        conv_bias (`bool`, *optional*, defaults to `False`):
            Whether the 1D convolutional layers have a bias.
        num_conv_pos_embeddings (`int`, *optional*, defaults to 128):
            Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional
            embeddings layer.
        num_conv_pos_embedding_groups (`int`, *optional*, defaults to 16):
            Number of groups of 1D convolutional positional embeddings layer.
        do_stable_layer_norm (`bool`, *optional*, defaults to `False`):
            Whether do apply *stable* layer norm architecture of the Transformer encoder. `do_stable_layer_norm is
            True` corresponds to applying layer norm before the attention layer, whereas `do_stable_layer_norm is
            False` corresponds to applying layer norm after the attention layer.
        apply_spec_augment (`bool`, *optional*, defaults to `True`):
            Whether to apply *SpecAugment* data augmentation to the outputs of the feature encoder. For reference see
            [SpecAugment: A Simple Data Augmentation Method for Automatic Speech
            Recognition](https://arxiv.org/abs/1904.08779).
        mask_time_prob (`float`, *optional*, defaults to 0.05):
            Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked. The masking
            procecure generates ''mask_time_prob*len(time_axis)/mask_time_length'' independent masks over the axis. If
            reasoning from the propability of each feature vector to be chosen as the start of the vector span to be
            masked, *mask_time_prob* should be `prob_vector_start*mask_time_length`. Note that overlap may decrease the
            actual percentage of masked vectors. This is only relevant if `apply_spec_augment is True`.
        mask_time_length (`int`, *optional*, defaults to 10):
            Length of vector span along the time axis.
        mask_time_min_masks (`int`, *optional*, defaults to 2),:
            The minimum number of masks of length `mask_feature_length` generated along the time axis, each time step,
            irrespectively of `mask_feature_prob`. Only relevant if ''mask_time_prob*len(time_axis)/mask_time_length <
            mask_time_min_masks''
        mask_feature_prob (`float`, *optional*, defaults to 0.0):
            Percentage (between 0 and 1) of all feature vectors along the feature axis which will be masked. The
            masking procecure generates ''mask_feature_prob*len(feature_axis)/mask_time_length'' independent masks over
            the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector
            span to be masked, *mask_feature_prob* should be `prob_vector_start*mask_feature_length`. Note that overlap
            may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is
            True`.
        mask_feature_length (`int`, *optional*, defaults to 10):
            Length of vector span along the feature axis.
        mask_feature_min_masks (`int`, *optional*, defaults to 0),:
            The minimum number of masks of length `mask_feature_length` generated along the feature axis, each time
            step, irrespectively of `mask_feature_prob`. Only relevant if
            ''mask_feature_prob*len(feature_axis)/mask_feature_length < mask_feature_min_masks''
        ctc_loss_reduction (`str`, *optional*, defaults to `"sum"`):
            Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an
            instance of [`HubertForCTC`].
        ctc_zero_infinity (`bool`, *optional*, defaults to `False`):
            Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly
            occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance
            of [`HubertForCTC`].
        use_weighted_layer_sum (`bool`, *optional*, defaults to `False`):
            Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an
            instance of [`HubertForSequenceClassification`].
        classifier_proj_size (`int`, *optional*, defaults to 256):
            Dimensionality of the projection before token mean-pooling for classification.

    Example:

    ```python
    >>> from transformers import HubertModel, HubertConfig

    >>> # Initializing a Hubert facebook/hubert-base-ls960 style configuration
    >>> configuration = HubertConfig()

    >>> # Initializing a model from the facebook/hubert-base-ls960 style configuration
    >>> model = HubertModel(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```Zhubert             gelu皙?T        {Gz?h㈵>group   r   r   r   r   r   r         r   r   r   r   r   
   r   r   r   r   r   r   F      皙?r   r   r   sum      c'           (         sl  t  jf |'|$|%|&d || _|| _|| _t|| _t|| _t|| _|| _	|| _
|| _t| j| _|| _|| _|| _|| _|| _|	| _|| _|
| _|| _|| _|| _|| _|| _|| _|| _|"| _|#| _t| j| jkst| j| jkst| j| jkr2tdt| j dt| j dt| j d|| _ || _!|| _"|| _#|| _$|| _%|| _&| | _'|!| _(d S )N)pad_token_idbos_token_ideos_token_idzConfiguration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) = z`, `len(config.conv_stride) = z`, `len(config.conv_kernel) = z`.))super__init__hidden_sizefeat_extract_normfeat_extract_activationlistconv_dimconv_strideconv_kernel	conv_biasnum_conv_pos_embeddingsnum_conv_pos_embedding_groupslenZnum_feat_extract_layersnum_hidden_layersintermediate_size
hidden_actnum_attention_headshidden_dropoutattention_dropoutactivation_dropoutfeat_proj_layer_normfeat_proj_dropoutfinal_dropout	layerdroplayer_norm_epsinitializer_range
vocab_sizedo_stable_layer_normuse_weighted_layer_sumclassifier_proj_size
ValueErrorapply_spec_augmentmask_time_probmask_time_lengthmask_time_min_masksmask_feature_probmask_feature_lengthmask_feature_min_masksctc_loss_reductionctc_zero_infinity)(selfr:   r"   r-   r0   r.   r/   r1   r3   r2   r4   r5   r6   r7   r9   r8   r#   r$   r&   r'   r(   r)   r*   r+   r;   r?   r@   rA   rB   rC   rD   rE   rF   rG   r<   r=   r   r   r   kwargs	__class__ n/var/www/html/Darija-Ai-API/env/lib/python3.8/site-packages/transformers/models/hubert/configuration_hubert.pyr!      s\    *


(zHubertConfig.__init__c                 C   s   t tj| jdS )Nr   )	functoolsreduceoperatormulr'   )rH   rL   rL   rM   inputs_to_logits_ratio  s    z#HubertConfig.inputs_to_logits_ratio)&r   r   r   r   r	   r
   r   r   r   Tr   r   r   r   r   r   r
   r   r   r   Fr   r   FTr   r   r   r   r   r   r   FFr   r   r   r   )	__name__
__module____qualname____doc__Z
model_typer!   propertyrR   __classcell__rL   rL   rJ   rM   r       sX                                          `r   )rV   rN   rP   Zconfiguration_utilsr   utilsr   Z
get_loggerrS   loggerZ$HUBERT_PRETRAINED_CONFIG_ARCHIVE_MAPr   rL   rL   rL   rM   <module>   s   
 