U
    ,-e                     @   sb  d Z ddlZddlZddlZddlZddlmZ ddlmZ ddl	m
Z
mZmZmZmZmZ ddlZddlmZ ddlmZmZ dd	lmZmZmZmZmZmZmZmZm Z  e!e"Z#e
re rddl$Z$e rddl%Z&e rddl'mZ( d
ddZ)ddiddidZ*dej+iZ,dZ-eee.ee/e.f f  Z0eG dd deZ1G dd deZ2G dd deZ3dS )z Tokenization class for Wav2Vec2.    N)	dataclass)groupby)TYPE_CHECKINGDictListOptionalTupleUnion   )PreTrainedTokenizer)
AddedTokenBatchEncoding)	ModelOutputPaddingStrategy
TensorTypeadd_end_docstringsis_flax_availableis_tf_availableis_torch_availablelogging	to_py_objz
vocab.jsonztokenizer_config.json
vocab_fileZtokenizer_config_filefacebook/wav2vec2-base-960hJhttps://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/vocab.jsonzUhttps://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/tokenizer_config.jsona  
            padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
                Activates and controls padding. Accepts the following values:

                - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
                  sequence if provided).
                - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
                  acceptable input length for the model if that argument is not provided.
                - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
                  lengths).
            max_length (`int`, *optional*):
                Controls the maximum length to use by one of the truncation/padding parameters.

                If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
                is required by one of the truncation/padding parameters. If the model has no specific maximum input
                length (like XLNet) truncation/padding to a maximum length will be deactivated.
            pad_to_multiple_of (`int`, *optional*):
                If set will pad the sequence to a multiple of the provided value. This is especially useful to enable
                the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta).
            return_tensors (`str` or [`~utils.TensorType`], *optional*):
                If set, will return tensors instead of list of python integers. Acceptable values are:

                - `'tf'`: Return TensorFlow `tf.constant` objects.
                - `'pt'`: Return PyTorch `torch.Tensor` objects.
                - `'np'`: Return Numpy `np.ndarray` objects.
            verbose (`bool`, *optional*, defaults to `True`):
                Whether or not to print more information and warnings.
c                   @   sV   e Zd ZU dZeee ef ed< dZeee	 e	f ed< dZ
eee	 e	f ed< dS )Wav2Vec2CTCTokenizerOutputaN  
    Output type of [` Wav2Vec2CTCTokenizer`], with transcription.

    Args:
        text (list of `str` or `str`):
            Decoded logits in text from. Usually the speech transcription.
        char_offsets (list of `List[Dict[str, Union[int, str]]]` or `List[Dict[str, Union[int, str]]]`):
            Offsets of the decoded characters. In combination with sampling rate and model downsampling rate char
            offsets can be used to compute time stamps for each charater. Total logit score of the beam associated with
            produced text.
        word_offsets (list of `List[Dict[str, Union[int, str]]]` or `List[Dict[str, Union[int, str]]]`):
            Offsets of the decoded words. In combination with sampling rate and model downsampling rate word offsets
            can be used to compute time stamps for each word.
    textNchar_offsetsword_offsets)__name__
__module____qualname____doc__r	   r   str__annotations__r   
ListOfDictr    r&   r&   s/var/www/html/Darija-Ai-Train/env/lib/python3.8/site-packages/transformers/models/wav2vec2/tokenization_wav2vec2.pyr   i   s   
r   c                       sF  e Zd ZdZeZeZeZ	ddgZ
dB fdd	ZedddZeedddZeee dddZejdd Zejdd ZeedddZedddZdCeee ee f eed fddZdd  Zeed!d"d#Zeed$d%d&ZdDee eeeeeeeeef f d(d)d*Z e!ee ee eeeeeeef f  d+d,d-Z"e!dEeeeeef f eeeeeef f d.d/d0Z#dFd1d2Z$dGee eeeeee ee ed3d4d5Z%dHeee eee  d6d7d8f eeeeee d9d:d;Z&dIeeee d6d7d8f eeeeed<d=d>Z'dJeee e(e d?d@dAZ)  Z*S )KWav2Vec2CTCTokenizera   
    Constructs a Wav2Vec2CTC tokenizer.

    This tokenizer inherits from [`PreTrainedTokenizer`] which contains some of the main methods. Users should refer to
    the superclass for more information regarding such methods.

    Args:
        vocab_file (`str`):
            File containing the vocabulary.
        bos_token (`str`, *optional*, defaults to `"<s>"`):
            The beginning of sentence token.
        eos_token (`str`, *optional*, defaults to `"</s>"`):
            The end of sentence token.
        unk_token (`str`, *optional*, defaults to `"<unk>"`):
            The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
            token instead.
        pad_token (`str`, *optional*, defaults to `"<pad>"`):
            The token used for padding, for example when batching sequences of different lengths.
        word_delimiter_token (`str`, *optional*, defaults to `"|"`):
            The token used for defining the end of a word.
        do_lower_case (`bool`, *optional*, defaults to `False`):
            Whether or not to accept lowercase input and lowercase the output when decoding.
        target_lang (`str`, *optional*):
            A target language the tokenizer should set by default. `target_lang` has to be defined for multi-lingual,
            nested vocabulary such as [facebook/mms-1b-all](https://huggingface.co/facebook/mms-1b-all).

        **kwargs
            Additional keyword arguments passed along to [`PreTrainedTokenizer`]
    Z	input_idsattention_mask<s></s><unk><pad>| FNc
                    s   || _ || _|| _|	| _t|dd}t|| _W 5 Q R X |	d k	rR| j|	 | _n| j| _dd | j	 D | _
t jf ||||||||	d|
 g }| j D ]&}t|dkr|t|dddd	 q| | d S )
Nutf-8encodingc                 S   s   i | ]\}}||qS r&   r&   .0kvr&   r&   r'   
<dictcomp>   s      z1Wav2Vec2CTCTokenizer.__init__.<locals>.<dictcomp>)	unk_token	bos_token	eos_token	pad_tokendo_lower_caseword_delimiter_tokenreplace_word_delimiter_chartarget_lang   TF)rstriplstrip
normalized)_word_delimiter_tokenr<   r>   r?   openjsonloadvocabencoderitemsdecodersuper__init__keyslenappendr   
add_tokens)selfr   r9   r:   r8   r;   r=   r>   r<   r?   kwargsvocab_handleZunique_no_split_tokenstoken	__class__r&   r'   rM      s6    	zWav2Vec2CTCTokenizer.__init__)r?   c                 C   s   | j | jkrt| j  d|| j krFt| dd| j   d|| _|| jd< | j | | _dd | j D | _| 	dd	 | j D  d
S )zN
        Set the target language of a nested multi-lingual dictionary
        zF is not a multi-lingual, nested tokenizer. Cannot set target language.z does not exist. Choose one of z, .r?   c                 S   s   i | ]\}}||qS r&   r&   r3   r&   r&   r'   r7      s      z8Wav2Vec2CTCTokenizer.set_target_lang.<locals>.<dictcomp>c                 S   s   g | ]}t |d kr|qS )r@   )rO   r4   rU   r&   r&   r'   
<listcomp>   s      z8Wav2Vec2CTCTokenizer.set_target_lang.<locals>.<listcomp>N)
rH   rI   
ValueErrorjoinrN   r?   Zinit_kwargsrJ   rK   rQ   )rR   r?   r&   r&   r'   set_target_lang   s    
 
z$Wav2Vec2CTCTokenizer.set_target_langreturnc                 C   s(   | j dkr| jrtd dS t| j S )z^
        `str`: Word delimiter token. Log an error if used while not having been set.
        N2Using word_delimiter_token, but it is not set yet.rD   verboseloggererrorr#   rR   r&   r&   r'   r=      s    
z)Wav2Vec2CTCTokenizer.word_delimiter_tokenc                 C   s   | j dkrdS | | jS z
        `Optional[int]`: Id of the word_delimiter_token in the vocabulary. Returns `None` if the token has not been
        set.
        NrD   convert_tokens_to_idsr=   re   r&   r&   r'   word_delimiter_token_id   s    
z,Wav2Vec2CTCTokenizer.word_delimiter_token_idc                 C   s
   || _ d S NrD   rR   valuer&   r&   r'   r=     s    c                 C   s   |  || _d S rj   rh   rD   rl   r&   r&   r'   ri     s    c                 C   s
   t | jS rj   rO   rK   re   r&   r&   r'   
vocab_size	  s    zWav2Vec2CTCTokenizer.vocab_sizec                 C   s   t | j}|| j |S rj   )dictrI   updateadded_tokens_encoder)rR   rH   r&   r&   r'   	get_vocab  s    
zWav2Vec2CTCTokenizer.get_vocab)
new_tokensspecial_tokensr_   c              	      sH   g }|D ]0}t |tr.|t|dddd q|| qt ||S )NF)rA   rB   	normalize)
isinstancer#   rP   r   rL   _add_tokens)rR   ru   rv   Zto_addrU   rV   r&   r'   ry     s    
z Wav2Vec2CTCTokenizer._add_tokensc                 K   s    | j r| }t|d| jS )zZ
        Converts a string in a sequence of tokens (string), using the tokenizer.
        r/   )r<   upperlistreplacer=   )rR   r   rS   r&   r&   r'   	_tokenize  s    zWav2Vec2CTCTokenizer._tokenizerU   r_   c                 C   s   | j || j | jS z=Converts a token (str) in an index (integer) using the vocab.rI   getr8   rR   rU   r&   r&   r'   _convert_token_to_id&  s    z)Wav2Vec2CTCTokenizer._convert_token_to_idindexr_   c                 C   s   | j || j}|S z=Converts an index (integer) in a token (str) using the vocab.rK   r   r8   rR   r   resultr&   r&   r'   _convert_id_to_token*  s    z)Wav2Vec2CTCTokenizer._convert_id_to_tokenT)tokensgroup_tokensspaces_between_special_tokensoutput_char_offsetsoutput_word_offsetsr_   c              
      sD  t |dkrdg g dS |r8tdd t|D  \}}n|}t |dg }tt fdd|} fd	d
|D }d }	}
|s|r || j}	t |	t |krtd|	 d| dt |	 dt | t|D ]\}}||	| d< qd}
|r 	|	 j
}
|sd}	|rdnd}|| } jr8| }||	|
dS )l
        Converts a connectionist-temporal-classification (CTC) output tokens into a single string.
        r    r   r   r   c                 s   s"   | ]\}}|t t|fV  qd S rj   )rO   r{   )r4   rU   Z
group_iterr&   r&   r'   	<genexpr>>  s     z@Wav2Vec2CTCTokenizer.convert_tokens_to_string.<locals>.<genexpr>r@   c                    s
   |  j kS rj   r;   )charre   r&   r'   <lambda>D      z?Wav2Vec2CTCTokenizer.convert_tokens_to_string.<locals>.<lambda>c                    s    g | ]}| j kr jn|qS r&   )r=   r>   )r4   r   re   r&   r'   rZ   G  s    zAWav2Vec2CTCTokenizer.convert_tokens_to_string.<locals>.<listcomp>Nz`char_offsets`: z and `processed_tokens`: z9 have to be of the same length, but are: `len(offsets)`: z and `len(processed_tokens)`: r   r/   )rO   zipr   r{   filter_compute_offsetsr;   r[   	enumerate_get_word_offsetsr>   r\   stripr<   lower)rR   r   r   r   r   r   charschar_repetitionsZprocessed_charsr   r   ir   Z	join_charstringr&   re   r'   convert_tokens_to_string/  s:    

"z-Wav2Vec2CTCTokenizer.convert_tokens_to_string)r   r   	ctc_tokenr_   c                    sV   t |  }t dg|d d f}dd t|||D }tt fdd|}|S )Nr   c                 S   s   g | ]\}}}|||d qS ))r   start_offset
end_offsetr&   )r4   tser&   r&   r'   rZ   u  s    z9Wav2Vec2CTCTokenizer._compute_offsets.<locals>.<listcomp>c                    s   | d  kS )Nr   r&   )offsetsr   r&   r'   r   z  r   z7Wav2Vec2CTCTokenizer._compute_offsets.<locals>.<lambda>)npasarrayZcumsumZconcatenater   r{   r   )r   r   r   Zend_indicesZstart_indicesr   r&   r   r'   r   n  s    
z%Wav2Vec2CTCTokenizer._compute_offsets)r   word_delimiter_charr_   c                 C   s   g }d}d}d}d}t | D ]n\}}|d }	|	|kr8dnd}
|
|krV|d }||	7 }n0|
dkrr||||d n|d }|d }|	}|
}q|dkr||||d |S )	NSPACEr   r   r   ZWORDr   )wordr   r   r   )r   rP   )r   r   r   Z
last_stater   r   r   r   offsetr   stater&   r&   r'   r   }  s(    
z&Wav2Vec2CTCTokenizer._get_word_offsetsc                 K   s   |rd| }||fS )Nr/   r&   )rR   r   Zis_split_into_wordsrS   r&   r&   r'   prepare_for_tokenization  s    z-Wav2Vec2CTCTokenizer.prepare_for_tokenization)	token_idsskip_special_tokensclean_up_tokenization_spacesr   r   r   r   r_   c                 C   s   | j ||d}g }	|D ]}
|r*|
| jkr*q|	|
 q| j|	||||d}|d }|dk	r^|n| j}|rr| |}|sz|rt||d |d dS |S dS )7  
        special _decode function is needed for Wav2Vec2Tokenizer because added tokens should be treated exactly the
        same as tokens of the base vocabulary and therefore the function `convert_tokens_to_string` has to be called on
        the whole token list and not individually on added tokens
        r   )r   r   r   r   r   Nr   r   r   )convert_ids_to_tokensall_special_idsrP   r   r   clean_up_tokenizationr   )rR   r   r   r   r   r   r   r   filtered_tokensr   rU   Zstring_outputr   r&   r&   r'   _decode  s6    
zWav2Vec2CTCTokenizer._decodez
np.ndarrayztorch.Tensorz	tf.Tensor)	sequencesr   r   r   r   r_   c                    sB   fdd|D  s$r>t  fdd d D S  S )a  
        Convert a list of lists of token ids into a list of strings by calling decode.

        Args:
            sequences (`Union[List[int], List[List[int]], np.ndarray, torch.Tensor, tf.Tensor]`):
                List of tokenized input ids. Can be obtained using the `__call__` method.
            skip_special_tokens (`bool`, *optional*, defaults to `False`):
                Whether or not to remove special tokens in the decoding.
            clean_up_tokenization_spaces (`bool`, *optional*):
                Whether or not to clean up the tokenization spaces.
            output_char_offsets (`bool`, *optional*, defaults to `False`):
                Whether or not to output character offsets. Character offsets can be used in combination with the
                sampling rate and model downsampling rate to compute the time-stamps of transcribed characters.

                <Tip>

                Please take a look at the Example of [`~Wav2Vec2CTCTokenizer.decode`] to better understand how to make
                use of `output_char_offsets`. [`~Wav2Vec2CTCTokenizer.batch_decode`] works the same way with batched
                output.

                </Tip>

            output_word_offsets (`bool`, *optional*, defaults to `False`):
                Whether or not to output word offsets. Word offsets can be used in combination with the sampling rate
                and model downsampling rate to compute the time-stamps of transcribed words.

                <Tip>

                Please take a look at the Example of [`~Wav2Vec2CTCTokenizer.decode`] to better understand how to make
                use of `output_word_offsets`. [`~Wav2Vec2CTCTokenizer.batch_decode`] works the same way with batched
                output.

                </Tip>

            kwargs (additional keyword arguments, *optional*):
                Will be passed to the underlying model specific decode method.

        Returns:
            `List[str]` or [`~models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizerOutput`]: The list of decoded
            sentences. Will be a [`~models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizerOutput`] when
            `output_char_offsets == True` or `output_word_offsets == True`.
        c              	      s(   g | ] }j |f d qS ))r   r   r   r   )decode)r4   seq)r   rS   r   r   rR   r   r&   r'   rZ     s   	z5Wav2Vec2CTCTokenizer.batch_decode.<locals>.<listcomp>c                    s    i | ]   fd dD qS )c                    s   g | ]}|  qS r&   r&   )r4   dr5   r&   r'   rZ     s     z@Wav2Vec2CTCTokenizer.batch_decode.<locals>.<dictcomp>.<listcomp>r&   )r4   )batch_decodedr   r'   r7     s      z5Wav2Vec2CTCTokenizer.batch_decode.<locals>.<dictcomp>r   )r   )rR   r   r   r   r   r   rS   r&   )r   r   rS   r   r   rR   r   r'   batch_decode  s    3	z!Wav2Vec2CTCTokenizer.batch_decode)r   r   r   r   r   r_   c                 K   s$   t |}| jf |||||d|S )a5  
        Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special
        tokens and clean up tokenization spaces.

        Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`.

        Args:
            token_ids (`Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]`):
                List of tokenized input ids. Can be obtained using the `__call__` method.
            skip_special_tokens (`bool`, *optional*, defaults to `False`):
                Whether or not to remove special tokens in the decoding.
            clean_up_tokenization_spaces (`bool`, *optional*):
                Whether or not to clean up the tokenization spaces.
            output_char_offsets (`bool`, *optional*, defaults to `False`):
                Whether or not to output character offsets. Character offsets can be used in combination with the
                sampling rate and model downsampling rate to compute the time-stamps of transcribed characters.

                <Tip>

                Please take a look at the example below to better understand how to make use of `output_char_offsets`.

                </Tip>

            output_word_offsets (`bool`, *optional*, defaults to `False`):
                Whether or not to output word offsets. Word offsets can be used in combination with the sampling rate
                and model downsampling rate to compute the time-stamps of transcribed words.

                <Tip>

                Please take a look at the example below to better understand how to make use of `output_word_offsets`.

                </Tip>

            kwargs (additional keyword arguments, *optional*):
                Will be passed to the underlying model specific decode method.

        Returns:
            `str` or [`~models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizerOutput`]: The list of decoded
            sentences. Will be a [`~models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizerOutput`] when
            `output_char_offsets == True` or `output_word_offsets == True`.

        Example:

        ```python
        >>> # Let's see how to retrieve time steps for a model
        >>> from transformers import AutoTokenizer, AutoFeatureExtractor, AutoModelForCTC
        >>> from datasets import load_dataset
        >>> import datasets
        >>> import torch

        >>> # import model, feature extractor, tokenizer
        >>> model = AutoModelForCTC.from_pretrained("facebook/wav2vec2-base-960h")
        >>> tokenizer = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h")
        >>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h")

        >>> # load first sample of English common_voice
        >>> dataset = load_dataset("common_voice", "en", split="train", streaming=True)
        >>> dataset = dataset.cast_column("audio", datasets.Audio(sampling_rate=16_000))
        >>> dataset_iter = iter(dataset)
        >>> sample = next(dataset_iter)

        >>> # forward sample through model to get greedily predicted transcription ids
        >>> input_values = feature_extractor(sample["audio"]["array"], return_tensors="pt").input_values
        >>> logits = model(input_values).logits[0]
        >>> pred_ids = torch.argmax(logits, axis=-1)

        >>> # retrieve word stamps (analogous commands for `output_char_offsets`)
        >>> outputs = tokenizer.decode(pred_ids, output_word_offsets=True)
        >>> # compute `time_offset` in seconds as product of downsampling ratio and sampling_rate
        >>> time_offset = model.config.inputs_to_logits_ratio / feature_extractor.sampling_rate

        >>> word_offsets = [
        ...     {
        ...         "word": d["word"],
        ...         "start_time": round(d["start_offset"] * time_offset, 2),
        ...         "end_time": round(d["end_offset"] * time_offset, 2),
        ...     }
        ...     for d in outputs.word_offsets
        ... ]
        >>> # compare word offsets with audio `common_voice_en_100038.mp3` online on the dataset viewer:
        >>> # https://huggingface.co/datasets/common_voice/viewer/en/train
        >>> word_offsets[:3]
        [{'word': 'WHY', 'start_time': 1.42, 'end_time': 1.54}, {'word': 'DOES', 'start_time': 1.64, 'end_time': 1.9}, {'word': 'MILISANDRA', 'start_time': 2.26, 'end_time': 2.9}]
        ```)r   r   r   r   r   )r   r   )rR   r   r   r   r   r   rS   r&   r&   r'   r      s    ^zWav2Vec2CTCTokenizer.decodesave_directoryfilename_prefixr_   c              	   C   s   t j|s"td| d d S t j||r6|d ndtd  }t|ddd$}|t	j
| jd	d
ddd  W 5 Q R X |fS NzVocabulary path (z) should be a directory-r   r   wr0   r1      TF)indent	sort_keysensure_ascii
)ospathisdirrc   rd   r\   VOCAB_FILES_NAMESrE   writerF   dumpsrH   rR   r   r   r   fr&   r&   r'   save_vocabulary  s     (z$Wav2Vec2CTCTokenizer.save_vocabulary)r*   r+   r,   r-   r.   r/   FN)F)TFFF)r/   )F)FNTFFF)FNFF)FNFF)N)+r   r    r!   r"   r   vocab_files_namesPRETRAINED_VOCAB_FILES_MAPpretrained_vocab_files_map&PRETRAINED_POSITIONAL_EMBEDDINGS_SIZESZmax_model_input_sizesmodel_input_namesrM   r#   r]   propertyr=   r   intri   setterrp   r   rt   r	   r   r   boolry   r}   r   r   floatr   staticmethodr   r   r   r   r   r   r   r   __classcell__r&   r&   rV   r'   r(      s           6		

(	    ?    "
      8    I    ir(   c                
       sp  e Zd ZdZeZddiddidZddgZd/ fdd	Ze	e
dddZe	ee dddZejdd Zejdd Zeed0eejee eej eee  f eee
ef ee ee eee
ef  eedddZe	edddZedddZe
ed d!d"Zee
d#d$d%Z ee
 e
d&d'd(Z!d1ee eee
d)d*d+Z"d2e
ee
 e#e
 d,d-d.Z$  Z%S )3Wav2Vec2Tokenizera8	  
    Constructs a Wav2Vec2 tokenizer.

    This tokenizer inherits from [`PreTrainedTokenizer`] which contains some of the main methods. Users should refer to
    the superclass for more information regarding such methods.

    Args:
        vocab_file (`str`):
            File containing the vocabulary.
        bos_token (`str`, *optional*, defaults to `"<s>"`):
            The beginning of sentence token.
        eos_token (`str`, *optional*, defaults to `"</s>"`):
            The end of sentence token.
        unk_token (`str`, *optional*, defaults to `"<unk>"`):
            The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
            token instead.
        pad_token (`str`, *optional*, defaults to `"<pad>"`):
            The token used for padding, for example when batching sequences of different lengths.
        word_delimiter_token (`str`, *optional*, defaults to `"|"`):
            The token used for defining the end of a word.
        do_lower_case (`bool`, *optional*, defaults to `False`):
            Whether or not to lowercase the output when decoding.
        do_normalize (`bool`, *optional*, defaults to `False`):
            Whether or not to zero-mean unit-variance normalize the input. Normalizing can help to significantly
            improve the performance for some models, *e.g.*,
            [wav2vec2-lv60](https://huggingface.co/models?search=lv60).
        return_attention_mask (`bool`, *optional*, defaults to `False`):
            Whether or not [`~Wav2Vec2Tokenizer.__call__`] should return `attention_mask`.

            <Tip>

            Wav2Vec2 models that have set `config.feat_extract_norm == "group"`, such as
            [wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base-960h), have **not** been trained using
            `attention_mask`. For such models, `input_values` should simply be padded with 0 and no `attention_mask`
            should be passed.

            For Wav2Vec2 models that have set `config.feat_extract_norm == "layer"`, such as
            [wav2vec2-lv60](https://huggingface.co/facebook/wav2vec2-large-960h-lv60-self), `attention_mask` should be
            passed for batched inference.

            </Tip>

        **kwargs
            Additional keyword arguments passed along to [`PreTrainedTokenizer`]
    r   r   zNhttps://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/tokenizer.jsonr   input_valuesr)   r*   r+   r,   r-   r.   Fc
                    s   t dt || _|| _|	| _|| _t|dd}t	|| _
W 5 Q R X dd | j
 D | _t jf |||||||	|d|
 d S )NzThe class `Wav2Vec2Tokenizer` is deprecated and will be removed in version 5 of Transformers. Please use `Wav2Vec2Processor` or `Wav2Vec2CTCTokenizer` instead.r0   r1   c                 S   s   i | ]\}}||qS r&   r&   r3   r&   r&   r'   r7     s      z.Wav2Vec2Tokenizer.__init__.<locals>.<dictcomp>)r8   r9   r:   r;   r<   do_normalizereturn_attention_maskr=   )warningswarnFutureWarningrD   r<   r   r   rE   rF   rG   rI   rJ   rK   rL   rM   )rR   r   r9   r:   r8   r;   r=   r<   r   r   rS   rT   rV   r&   r'   rM     s.    	zWav2Vec2Tokenizer.__init__r^   c                 C   s(   | j dkr| jrtd dS t| j S )zW
        `str`: Padding token. Log an error if used while not having been set.
        Nr`   ra   re   r&   r&   r'   r=     s    
z&Wav2Vec2Tokenizer.word_delimiter_tokenc                 C   s   | j dkrdS | | jS rf   rg   re   r&   r&   r'   ri     s    
z)Wav2Vec2Tokenizer.word_delimiter_token_idc                 C   s
   || _ d S rj   rk   rl   r&   r&   r'   r=     s    c                 C   s   |  || _d S rj   rn   rl   r&   r&   r'   ri     s    NT)
raw_speechpadding
max_lengthpad_to_multiple_ofreturn_tensorsrb   r_   c              	   K   s   t |tjot|jdk}|r:t|jdkr:td|  |p`t |ttfo`t |d tjttf}	|	rt |d tjsdd |D }n|	st |tjst|}|	s|g}| j	rdd |D }t
d|i}
| j|
|||| j||d	}|S )
a  
        Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
        sequences.

        Args:
            raw_speech (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`):
                The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float
                values, a list of numpy array or a list of list of float values. Must be mono channel audio, not
                stereo, i.e. single float per timestep.
        r@   r   z2Only mono-channel audio is supported for input to r   c                 S   s   g | ]}t |qS r&   )r   r   )r4   Zspeechr&   r&   r'   rZ   8  s     z.Wav2Vec2Tokenizer.__call__.<locals>.<listcomp>c                 S   s.   g | ]&}|t | t t |d   qS )gh㈵>)r   Zmeansqrtvar)r4   xr&   r&   r'   rZ   B  s     r   )r   r   r   r   r   rb   )rx   r   ndarrayrO   shaper[   r{   tupler   r   r   padr   )rR   r   r   r   r   r   rb   rS   Zis_batched_numpyZ
is_batchedZencoded_inputsZpadded_inputsr&   r&   r'   __call__  s2    "

zWav2Vec2Tokenizer.__call__c                 C   s
   t | jS rj   ro   re   r&   r&   r'   rp   S  s    zWav2Vec2Tokenizer.vocab_sizec                 C   s   t | jf| jS rj   )rq   rI   rs   re   r&   r&   r'   rt   W  s    zWav2Vec2Tokenizer.get_vocabr~   c                 C   s   | j || j | jS r   r   r   r&   r&   r'   r   Z  s    z&Wav2Vec2Tokenizer._convert_token_to_idr   c                 C   s   | j || j}|S r   r   r   r&   r&   r'   r   ^  s    z&Wav2Vec2Tokenizer._convert_id_to_token)r   r_   c                    sV   dd t |D }tt fdd|}d fdd|D  } jrR| }|S )r   c                 S   s   g | ]}|d  qS )r   r&   )r4   Ztoken_groupr&   r&   r'   rZ   h  s     z>Wav2Vec2Tokenizer.convert_tokens_to_string.<locals>.<listcomp>c                    s
   |  j kS rj   r   )rU   re   r&   r'   r   k  r   z<Wav2Vec2Tokenizer.convert_tokens_to_string.<locals>.<lambda>r   c                    s   g | ]}| j krd n|qS )r/   )r=   rY   re   r&   r'   rZ   n  s     )r   r{   r   r\   r   r<   r   )rR   r   Zgrouped_tokensr   r   r&   re   r'   r   c  s    z*Wav2Vec2Tokenizer.convert_tokens_to_string)r   r   r   r_   c           
      K   sl   | j ||d}g }|D ]}|r*|| jkr*q|| q| |}|dk	rL|n| j}|rd| |}	|	S |S dS )r   r   N)r   r   rP   r   r   r   )
rR   r   r   r   rS   r   r   rU   r   Z
clean_textr&   r&   r'   r   u  s    

zWav2Vec2Tokenizer._decoder   c              	   C   s   t j|s"td| d d S t j||r6|d ndtd  }t|ddd$}|t	j
| jd	d
ddd  W 5 Q R X |fS r   )r   r   r   rc   rd   r\   r   rE   r   rF   r   rI   r   r&   r&   r'   r     s     (z!Wav2Vec2Tokenizer.save_vocabulary)r*   r+   r,   r-   r.   FFF)FNNNT)FN)N)&r   r    r!   r"   r   r   r   r   rM   r   r#   r=   r   r   ri   r   r   WAV2VEC2_KWARGS_DOCSTRINGr	   r   r   r   r   r   r   r   r   r   rp   r   rt   r   r   r   r   r   r   r   r&   r&   rV   r'   r     sr   . 
        *		

     "9  !r   )4r"   rF   r   sysr   dataclassesr   	itertoolsr   typingr   r   r   r   r   r	   numpyr   Ztokenization_utilsr   Ztokenization_utils_baser   r   utilsr   r   r   r   r   r   r   r   r   Z
get_loggerr   rc   ZtorchZ
tensorflowtfZ	jax.numpyZjnpr   r   maxsizer   r   r#   r   r%   r   r(   r   r&   r&   r&   r'   <module>   sP    ,
 
    