U
    ×9%eœB  ã                X   @   s  d Z ddlmZ ddlmZmZmZmZmZ ddl	m
Z
 ddlmZmZ ddlmZ er|ddlmZ dd	lmZ dd
lmZ e e¡ZddiZdddddddddddddddddddd d!d"d#d$d%d&d'd(d)d*d+d,d-d.d/d0d1d2d3d4d5d6d7d8d9d:d;d<d=d>d?d@dAdBdCdDdEdFdGdHdIdJdKdLdMdNdOdPdQdRdSdTdUdVdWdXdYdZd[d\d]d^d_d`dadbdcddgXZdddddddddddddddddddd d!d"d#dedfdgdhdidjdkdldmdndodpdqdrdsdtdudvdwdxdydzd{d|d}d~dd€dd‚dƒd„d…d†d‡dˆd‰dŠd‹dŒddŽddd‘d’d“d”d•d–d—d˜d™dšd›dœddždŸd dcddd¡gVZG d¢d£„ d£e
ƒZG d¤d¥„ d¥eƒZd¦S )§z Whisper model configurationé    )ÚOrderedDict)ÚTYPE_CHECKINGÚAnyÚMappingÚOptionalÚUnioné   )ÚPretrainedConfig)Ú
OnnxConfigÚOnnxSeq2SeqConfigWithPast)Úlogging)ÚFeatureExtractionMixin)ÚPreTrainedTokenizerBase)Ú
TensorTypezopenai/whisper-basezChttps://huggingface.co/openai/whisper-base/resolve/main/config.jsoné   é   é   é   é	   é
   é   é   é   é   é   é   é   é:   é;   é<   é=   é>   é?   éZ   é[   é\   é]   ie  in  i¶  i  i­  iÁ  i  i¢  i"  iÄ  ió  iÿ  i  i?  ia  io  ic  iö  iS  ir  i9	  i¸	  i‹  i¨  is  iø  iÈ  i³  iß  i¹  i#  i‰%  iç&  iC)  i"*  iœ,  i½-  iƒ.  ik3  i5  i„5  iØ9  iÊ;  i@  i—A  iHF  iK  i6L  iôP  i!W  i—Y  ii  iu  iÔv  i‚  i…ˆ  i[  i-ž  iež  i²¸  iú¿  éQÄ  i·Ä  i¸Ä  i¹Ä  ig  i÷  i
  i  ii  i}  i†  i–  iš  i£  iF  i=  i¾  iœ	  iC
  i®  iµ  iÄ  iÐ  i  iy  iW  i;  i¹  i÷  ii  ie#  i§$  i¼(  i±*  i¢.  i/  i+0  i1  iá5  iM7  i+9  i¡;  i=  i©@  iÜ@  iºG  iJ  ikN  i«T  iøW  if  i1f  iCg  iwn  is  i¢{  i.~  iÖ~  i  io§  iA¹  iÎÂ  iNÄ  iRÄ  iºÄ  c                %       s„   e Zd ZdZdZdgZdddœZddd	d
d	d
ddddddddddddddddddddddgddddddddddf%‡ fdd„	Z‡  ZS )ÚWhisperConfiga#  
    This is the configuration class to store the configuration of a [`WhisperModel`]. It is used to instantiate a
    Whisper model according to the specified arguments, defining the model architecture. Instantiating a configuration
    with the defaults will yield a similar configuration to that of the Whisper
    [openai/whisper-tiny](https://huggingface.co/openai/whisper-tiny) architecture.

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.


    Args:
        vocab_size (`int`, *optional*, defaults to 51865):
            Vocabulary size of the Whisper model. Defines the number of different tokens that can be represented by the
            `decoder_input_ids` passed when calling [`WhisperModel`]
        num_mel_bins (`int`, *optional*, defaults to 80):
            Number of mel features used per input features. Should correspond to the value used in the
            `WhisperProcessor` class.
        encoder_layers (`int`, *optional*, defaults to 6):
            Number of encoder layers.
        decoder_layers (`int`, *optional*, defaults to 6):
            Number of decoder layers.
        encoder_attention_heads (`int`, *optional*, defaults to 4):
            Number of attention heads for each attention layer in the Transformer encoder.
        decoder_attention_heads (`int`, *optional*, defaults to 4):
            Number of attention heads for each attention layer in the Transformer decoder.
        encoder_ffn_dim (`int`, *optional*, defaults to 1536):
            Dimensionality of the "intermediate" (often named feed-forward) layer in encoder.
        decoder_ffn_dim (`int`, *optional*, defaults to 1536):
            Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
        encoder_layerdrop (`float`, *optional*, defaults to 0.0):
            The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
            for more details.
        decoder_layerdrop (`float`, *optional*, defaults to 0.0):
            The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
            for more details.
        decoder_start_token_id (`int`, *optional*, defaults to 50257):
            Corresponds to the "<|startoftranscript|>" token, which is automatically used when no `decoder_input_ids`
            are provided to the `generate` function. It is used to guide the model`s generation process depending on
            the task.
        use_cache (`bool`, *optional*, defaults to `True`):
            Whether or not the model should return the last key/values attentions (not used by all models).
        is_encoder_decoder (`bool`, *optional*, defaults to `True`):
            Whether the model is used as an encoder/decoder or not.
        activation_function (`str`, *optional*, defaults to `"gelu"`):
            The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
            `"relu"`, `"silu"` and `"gelu_new"` are supported.
        d_model (`int`, *optional*, defaults to 256):
            Dimensionality of the layers.
        dropout (`float`, *optional*, defaults to 0.1):
            The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
        attention_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for the attention probabilities.
        activation_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for activations inside the fully connected layer.
        init_std (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        scale_embedding (`bool`, *optional*, defaults to False):
            Scale embeddings by diving by sqrt(d_model).
        max_source_positions (`int`, *optional*, defaults to 1500):
            The maximum sequence length of log-mel filter-bank features that this model might ever be used with.
        max_target_positions (`int`, *optional*, defaults to 448):
            The maximum sequence length that this model might ever be used with. Typically set this to something large
            just in case (e.g., 512 or 1024 or 2048).
        pad_token_id (`int`, *optional*, defaults to 50256):
            Padding token id.
        bos_token_id (`int`, *optional*, defaults to 50256):
            Begin of stream token id.
        eos_token_id (`int`, *optional*, defaults to 50256):
            End of stream token id.
        suppress_tokens (`List[int]`, *optional*):
            A list containing the non-speech tokens that will be used by the logit processor in the `generate`
            function. NON_SPEECH_TOKENS and NON_SPEECH_TOKENS_MULTI each correspond to the `english-only` and the
            `multilingual` model.
        begin_suppress_tokens (`List[int]`, *optional*, defaults to `[220,50256]`):
            A list containing tokens that will be supressed at the beginning of the sampling process. Initialized as
            the token for `" "` (`blank_token_id`) and the `eos_token_id`
        use_weighted_layer_sum (`bool`, *optional*, defaults to `False`):
            Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an
            instance of [`WhisperForAudioClassification`].
        classifier_proj_size (`int`, *optional*, defaults to 256):
            Dimensionality of the projection before token mean-pooling for classification. Only relevant when using an
            instance of [`WhisperForAudioClassification`].
        apply_spec_augment (`bool`, *optional*, defaults to `False`):
            Whether to apply *SpecAugment* data augmentation to the outputs of the feature encoder. For reference see
            [SpecAugment: A Simple Data Augmentation Method for Automatic Speech
            Recognition](https://arxiv.org/abs/1904.08779).
        mask_time_prob (`float`, *optional*, defaults to 0.05):
            Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked. The masking
            procecure generates `mask_time_prob*len(time_axis)/mask_time_length` independent masks over the axis. If
            reasoning from the propability of each feature vector to be chosen as the start of the vector span to be
            masked, *mask_time_prob* should be `prob_vector_start*mask_time_length`. Note that overlap may decrease the
            actual percentage of masked vectors. This is only relevant if `apply_spec_augment == True`.
        mask_time_length (`int`, *optional*, defaults to 10):
            Length of vector span along the time axis.
        mask_time_min_masks (`int`, *optional*, defaults to 2),:
            The minimum number of masks of length `mask_feature_length` generated along the time axis, each time step,
            irrespectively of `mask_feature_prob`. Only relevant if ''mask_time_prob*len(time_axis)/mask_time_length <
            mask_time_min_masks''
        mask_feature_prob (`float`, *optional*, defaults to 0.0):
            Percentage (between 0 and 1) of all feature vectors along the feature axis which will be masked. The
            masking procecure generates `mask_feature_prob*len(feature_axis)/mask_time_length` independent masks over
            the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector
            span to be masked, *mask_feature_prob* should be `prob_vector_start*mask_feature_length`. Note that overlap
            may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is
            True`.
        mask_feature_length (`int`, *optional*, defaults to 10):
            Length of vector span along the feature axis.
        mask_feature_min_masks (`int`, *optional*, defaults to 0),:
            The minimum number of masks of length `mask_feature_length` generated along the feature axis, each time
            step, irrespectively of `mask_feature_prob`. Only relevant if
            `mask_feature_prob*len(feature_axis)/mask_feature_length < mask_feature_min_masks`.
        median_filter_width (`int`, *optional*, defaults to 7):
            Width of the median filter used to smoothen to cross-attention outputs when computing token timestamps.
            Should be an odd number.

    Example:

    ```python
    >>> from transformers import WhisperConfig, WhisperModel

    >>> # Initializing a Whisper tiny style configuration
    >>> configuration = WhisperConfig()

    >>> # Initializing a model (with random weights) from the tiny style configuration
    >>> model = WhisperModel(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```ZwhisperÚpast_key_valuesÚencoder_attention_headsÚd_model)Znum_attention_headsZhidden_sizei™Ê  éP   é   é   i   g        r'   TZgelué   g{®Gáz”?FiÜ  iÀ  iPÄ  NéÜ   gš™™™™™©?r   r   r   r   c&           '   
      sà   || _ || _|| _|| _|| _|| _|| _|| _|| _|| _	|| _
|| _|| _|| _|	| _|
| _|| _|| _|| _|| _|| _|| _|| _|| _|| _| | _|!| _|"| _|#| _|$| _|%| _tƒ j f |||||||dœ|&—Ž d S )N)Úpad_token_idÚbos_token_idÚeos_token_idÚis_encoder_decoderÚdecoder_start_token_idÚsuppress_tokensÚbegin_suppress_tokens)!Ú
vocab_sizeÚnum_mel_binsr+   Úencoder_layersr*   Údecoder_layersÚdecoder_attention_headsÚdecoder_ffn_dimÚencoder_ffn_dimÚdropoutÚattention_dropoutÚactivation_dropoutÚactivation_functionÚinit_stdÚencoder_layerdropÚdecoder_layerdropÚ	use_cacheZnum_hidden_layersÚscale_embeddingÚmax_source_positionsÚmax_target_positionsÚclassifier_proj_sizeÚuse_weighted_layer_sumÚapply_spec_augmentÚmask_time_probÚmask_time_lengthÚmask_time_min_masksÚmask_feature_probÚmask_feature_lengthÚmask_feature_min_masksÚmedian_filter_widthÚsuperÚ__init__)'Úselfr8   r9   r:   r*   r;   r<   r=   r>   rD   rE   r5   rF   r4   rB   r+   r?   r@   rA   rC   rG   rH   rI   r1   r2   r3   r6   r7   rK   rJ   rL   rM   rN   rO   rP   rQ   rR   rS   Úkwargs©Ú	__class__© úp/var/www/html/Darija-Ai-API/env/lib/python3.8/site-packages/transformers/models/whisper/configuration_whisper.pyrU   Ä   sT    )ùøzWhisperConfig.__init__)	Ú__name__Ú
__module__Ú__qualname__Ú__doc__Z
model_typeZkeys_to_ignore_at_inferenceZattribute_maprU   Ú__classcell__rZ   rZ   rX   r[   r(   >   sV    
Úr(   c                       sx   e Zd Zeeeeeef f dœdd„ƒZded
 eee	e
d eeeeeef dœ	‡ fdd„Zeedœdd„ƒZ‡  ZS )ÚWhisperOnnxConfig)Úreturnc                 C   sP   t dddddœfgƒ}| jr*ddi|d< nddd	œ|d< | jrL| j|d
d |S )NÚinput_featuresÚbatchZfeature_sizeZencoder_sequence)r   r   r   r   Údecoder_input_idsZdecoder_sequence)r   r   Úinputs)Ú	direction)r   Úuse_pastZfill_with_past_key_values_)rV   Zcommon_inputsrZ   rZ   r[   rf     s    ÿÿzWhisperOnnxConfig.inputséÿÿÿÿFNé"V  ç      @r0   )r   r   r   )	ÚpreprocessorÚ
batch_sizeÚ
seq_lengthÚis_pairÚ	frameworkÚsampling_rateÚtime_durationÚ	frequencyrb   c	              	      sŒ   t ƒ }	tj| |j|||||d}
|
d jd }| jr<|d n|}tƒ  |j||||¡}|
 d¡|	d< | d¡|	d< d|krˆ| d¡|	d< |	S )N)rl   rm   rp   rq   rr   rs   rc   r   re   r)   )	r   r
   Úgenerate_dummy_inputsZfeature_extractorÚshaperh   rT   Z	tokenizerÚpop)rV   rl   rm   rn   ro   rp   rq   rr   rs   Zdummy_inputsZencoder_inputsZencoder_sequence_lengthZdecoder_inputsrX   rZ   r[   rt   0  s0    ù	    ÿz'WhisperOnnxConfig.generate_dummy_inputsc                 C   s   dS )Ngü©ñÒMbP?rZ   )rV   rZ   rZ   r[   Úatol_for_validationT  s    z%WhisperOnnxConfig.atol_for_validation)ri   ri   FNrj   rk   r0   )r\   r]   r^   Úpropertyr   ÚstrÚintrf   r   Úboolr   Úfloatr   rt   rw   r`   rZ   rZ   rX   r[   ra     s,           ÷
ö$ra   N)r_   Úcollectionsr   Útypingr   r   r   r   r   Zconfiguration_utilsr	   Zonnxr
   r   Úutilsr   Zfeature_extraction_utilsr   Ztokenization_utils_baser   r   Z
get_loggerr\   ÚloggerZ%WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAPZNON_SPEECH_TOKENSZNON_SPEECH_TOKENS_MULTIr(   ra   rZ   rZ   rZ   r[   Ú<module>   s€  
 ÿ                                                                               ÷                                                                             ÷ a