U
    9%eK                     @   s  d dl mZ d dlmZmZmZmZ d dlZd dl	Z	ddl
mZ ddlmZ ddlmZmZmZ dd	lmZ dd
lmZmZmZ erd dlmZ ddlmZ ddlmZ eeZ e rd dl!Z!ddl"m#Z#m$Z$ dd Z%dddZ&dd Z'dd Z(G dd deZ)dd Z*dS )    )defaultdict)TYPE_CHECKINGDictOptionalUnionN   )	ModelCard)PreTrainedTokenizer)is_torch_availableis_torchaudio_availablelogging   )ffmpeg_read)ArgumentHandlerChunkPipelineinfer_framework_load_model)BeamSearchDecoderCTC)SequenceFeatureExtractor)PreTrainedModel)MODEL_FOR_CTC_MAPPING_NAMES(MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMESc                 C   sd   g }| D ]V\}}}t t|| }t t|| | }t t|| | }|||f}|| q|S )z
    Rescales the stride values from audio space to tokens/logits space.

    (160_000, 16_000, 16_000) -> (2000, 200, 200) for instance.
    )introundappend)strideratioZnew_stridesZinput_nleftrightZtoken_nZ
new_stride r   r/var/www/html/Darija-Ai-API/env/lib/python3.8/site-packages/transformers/pipelines/automatic_speech_recognition.pyrescale_stride)   s    	
r    Tc                 c   s4  | j d }|| | }td||D ]
}	|	| }
| |	|
 }|||jdd}|d k	r`|j|d}|	dkrldn|}|dkr|
|kn|
|k}|rdn|}|j d }|||f}d|kr|d j d }nd|kr|d j d }||j d kr|r|| }t|g|d }|j d |kr$||d|V  |r" q0q"d S )	Nr   ptsampling_rateZreturn_tensorsdtypeinput_featuresinput_values)is_lastr   )shaperanger#   tor    )inputsfeature_extractor	chunk_lenstride_leftstride_rightrescaler%   Z
inputs_lenstepZchunk_start_idxZchunk_end_idxchunk	processedZ_stride_leftr)   Z_stride_rightr   Zprocessed_lenr   r   r   r   
chunk_iter=   s0    


r6   c           
         s   t | }t |  fddt|d D }d}t|D ]R}t D ]D}| | || krB|| | d }|||d  |d < ||krB|}qBq6t|}|dkrt||kd d | nd}|dkrt||kd d | nd}	||	|fS )Nc                    s   g | ]}d g d  qS )r   r   r   ).0_Zseq_len_rightr   r   
<listcomp>]   s     z6_fast_find_longest_common_sequence.<locals>.<listcomp>r   r   r'   )lenr+   nparrayZargwhere)
Zsequence_leftZsequence_rightZseq_len_leftcounterlongestijZprevious_counter
index_leftindex_rightr   r9   r   "_fast_find_longest_common_sequenceZ   s    
&&rD   c              
      s    fdd| d d   D }| dd  D ]} fdd|d   D }d}d}tdt|d D ]\}|d }tt|| d  t|d | k}	|	| | }
|	dkrb|
|krb|}|
}qb|||d   q*t|S )Nc                    s   g | ]}| j kr|qS r   Zall_special_idsr7   Ztok_id	tokenizerr   r   r:   u   s     
 z1_find_longest_common_sequence.<locals>.<listcomp>r   r   c                    s   g | ]}| j kr|qS r   rE   rF   rG   r   r   r:   w   s     
 g        g     @)tolistr+   r;   r<   sumr=   extend)	sequencesrH   sequenceZnew_seqZnew_sequenceindexZmax_r@   ZepsmatchesZmatchingr   rG   r   _find_longest_common_sequencen   s    ,rP   c                       s   e Zd ZdZddedef ee eedef  ee ee ee	ee
df eeed	f  ed
ddZeejeef d fddZdddZdddZdddZdee dddZ  ZS )"AutomaticSpeechRecognitionPipelinea}  
    Pipeline that aims at extracting spoken text contained within some audio.

    The input can be either a raw waveform or a audio file. In case of the audio file, ffmpeg should be installed for
    to support multiple audio formats

    Example:

    ```python
    >>> from transformers import pipeline

    >>> transcriber = pipeline(model="openai/whisper-base")
    >>> transcriber("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/1.flac")
    {'text': ' He hoped there would be stew for dinner, turnips and carrots and bruised potatoes and fat mutton pieces to be ladled out in thick, peppered flour-fatten sauce.'}
    ```

    Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)

    Arguments:
        model ([`PreTrainedModel`] or [`TFPreTrainedModel`]):
            The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from
            [`PreTrainedModel`] for PyTorch and [`TFPreTrainedModel`] for TensorFlow.
        tokenizer ([`PreTrainedTokenizer`]):
            The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from
            [`PreTrainedTokenizer`].
        feature_extractor ([`SequenceFeatureExtractor`]):
            The feature extractor that will be used by the pipeline to encode waveform for the model.
        chunk_length_s (`float`, *optional*, defaults to 0):
            The input length for in each chunk. If `chunk_length_s = 0` then chunking is disabled (default).

            <Tip>

            For more information on how to effectively use `chunk_length_s`, please have a look at the [ASR chunking
            blog post](https://huggingface.co/blog/asr-chunking).

            </Tip>

        stride_length_s (`float`, *optional*, defaults to `chunk_length_s / 6`):
            The length of stride on the left and right of each chunk. Used only with `chunk_length_s > 0`. This enables
            the model to *see* more context and infer letters better than without this context but the pipeline
            discards the stride bits at the end to make the final reconstitution as perfect as possible.

            <Tip>

            For more information on how to effectively use `stride_length_s`, please have a look at the [ASR chunking
            blog post](https://huggingface.co/blog/asr-chunking).

            </Tip>

        framework (`str`, *optional*):
            The framework to use, either `"pt"` for PyTorch or `"tf"` for TensorFlow. The specified framework must be
            installed. If no framework is specified, will default to the one currently installed. If no framework is
            specified and both frameworks are installed, will default to the framework of the `model`, or to PyTorch if
            no model is provided.
        device (Union[`int`, `torch.device`], *optional*):
            Device ordinal for CPU/GPU supports. Setting this to `None` will leverage CPU, a positive will run the
            model on the associated CUDA device id.
        decoder (`pyctcdecode.BeamSearchDecoderCTC`, *optional*):
            [PyCTCDecode's
            BeamSearchDecoderCTC](https://github.com/kensho-technologies/pyctcdecode/blob/2fd33dc37c4111417e08d89ccd23d28e9b308d19/pyctcdecode/decoder.py#L180)
            can be passed for language model boosted decoding. See [`Wav2Vec2ProcessorWithLM`] for more information.

    N Fr   r   r   ztorch.deviceztorch.dtype)modelr.   rH   decoder	modelcard	frameworktaskargs_parserdevicetorch_dtypebinary_outputc                 K   sf  |d krt ||jd\}}|| _|| _|| _|| _|| _|| _t| jdd }|d k	rd|	d k	rdt	d| jdkrvt	d|	d k	rt
|	tr|	dk s| j|	 |	d kr|d k	rtt| }	nd}	t r0| jdkr0t
|	tjr|	| _nDt
|	trt|	| _n*|	dk rtd	| _ntd
|	 | _n|	d k	r>|	nd| _|
| _|| _| jjj}|d k	r||kr| jj|| | j r| jjjf || d| _|dd | _|dd | _| jjjdkrd| _ nP| jj!j"t# krd| _ n4|j$r(|j$%dr(|d k	r(|| _&d| _ nd| _ | j'f |\| _(| _)| _*t#+ }|t, | -| d S )N)confighf_device_mapzThe model has been loaded with `accelerate` and therefore cannot be moved to a specific device. Please discard the `device` argument when creating your pipeline object.tfzDThe AutomaticSpeechRecognitionPipeline is only available in PyTorch.r   r'   r!   cpuzcuda:Z
batch_sizeZnum_workersZwhisperseq2seq_whisperseq2seqZWithLMctc_with_lmctc).r   r\   rW   rS   rH   r.   rU   rV   getattr
ValueError
isinstancer   r,   nextitervaluesr
   torchrY   strrZ   r[   task_specific_paramsupdategetZcan_generateZgeneration_configZ
call_countpopZ_batch_sizeZ_num_workersZ
model_typetype	__class____name__r   Z_processor_classendswithrT   _sanitize_parametersZ_preprocess_paramsZ_forward_paramsZ_postprocess_paramscopyr   Zcheck_model_type)selfrS   r.   rH   rT   rU   rV   rW   rX   rY   rZ   r[   kwargsr]   rl   mappingr   r   r   __init__   sr    




z+AutomaticSpeechRecognitionPipeline.__init__)r-   c                    s   t  j|f|S )a  
        Transcribe the audio sequence(s) given as inputs to text. See the [`AutomaticSpeechRecognitionPipeline`]
        documentation for more information.

        Args:
            inputs (`np.ndarray` or `bytes` or `str` or `dict`):
                The inputs is either :
                    - `str` that is either the filename of a local audio file, or a public URL address to download the
                      audio file. The file will be read at the correct sampling rate to get the waveform using
                      *ffmpeg*. This requires *ffmpeg* to be installed on the system.
                    - `bytes` it is supposed to be the content of an audio file and is interpreted by *ffmpeg* in the
                      same way.
                    - (`np.ndarray` of shape (n, ) of type `np.float32` or `np.float64`)
                        Raw audio at the correct sampling rate (no further check will be done)
                    - `dict` form can be used to pass raw audio sampled at arbitrary `sampling_rate` and let this
                      pipeline do the resampling. The dict must be in the format `{"sampling_rate": int, "raw":
                      np.array}` with optionally a `"stride": (left: int, right: int)` than can ask the pipeline to
                      treat the first `left` samples and last `right` samples to be ignored in decoding (but used at
                      inference to provide more context to the model). Only use `stride` with CTC models.
            return_timestamps (*optional*, `str` or `bool`):
                Only available for pure CTC models (Wav2Vec2, HuBERT, etc) and the Whisper model. Not available for
                other sequence-to-sequence models.

                For CTC models, timestamps can take one of two formats:
                    - `"char"`: the pipeline will return timestamps along the text for every character in the text. For
                        instance, if you get `[{"text": "h", "timestamp": (0.5, 0.6)}, {"text": "i", "timestamp": (0.7,
                        0.9)}]`, then it means the model predicts that the letter "h" was spoken after `0.5` and before
                        `0.6` seconds.
                    - `"word"`: the pipeline will return timestamps along the text for every word in the text. For
                        instance, if you get `[{"text": "hi ", "timestamp": (0.5, 0.9)}, {"text": "there", "timestamp":
                        (1.0, 1.5)}]`, then it means the model predicts that the word "hi" was spoken after `0.5` and
                        before `0.9` seconds.

                For the Whisper model, timestamps can take one of two formats:
                    - `"word"`: same as above for word-level CTC timestamps. Word-level timestamps are predicted
                        through the *dynamic-time warping (DTW)* algorithm, an approximation to word-level timestamps
                        by inspecting the cross-attention weights.
                    - `True`: the pipeline will return timestamps along the text for *segments* of words in the text.
                        For instance, if you get `[{"text": " Hi there!", "timestamp": (0.5, 1.5)}]`, then it means the
                        model predicts that the segment "Hi there!" was spoken after `0.5` and before `1.5` seconds.
                        Note that a segment of text refers to a sequence of one or more words, rather than individual
                        words as with word-level timestamps.
            generate_kwargs (`dict`, *optional*):
                The dictionary of ad-hoc parametrization of `generate_config` to be used for the generation call. For a
                complete overview of generate, check the [following
                guide](https://huggingface.co/docs/transformers/en/main_classes/text_generation).
            max_new_tokens (`int`, *optional*):
                The maximum numbers of tokens to generate, ignoring the number of tokens in the prompt.

        Return:
            `Dict`: A dictionary with the following keys:
                - **text** (`str`): The recognized text.
                - **chunks** (*optional(, `List[Dict]`)
                    When using `return_timestamps`, the `chunks` will become a list containing all the various text
                    chunks identified by the model, *e.g.* `[{"text": "hi ", "timestamp": (0.5, 0.9)}, {"text":
                    "there", "timestamp": (1.0, 1.5)}]`. The original full text can roughly be recovered by doing
                    `"".join(chunk["text"] for chunk in output["chunks"])`.
        )super__call__)rv   r-   rw   rq   r   r   r{   &  s    ?z+AutomaticSpeechRecognitionPipeline.__call__c	                 C   sL  i }	|d k	r,| j dkr$|s$td ||	d< |d k	r<||	d< tt}
|d k	rX||
d d< |d k	r|d k	rxd|krxtd|
d | i }|d k	r||d< |d k	r| j dkr|rtd	| j d
kr|dkrtd| j dkr|dkrtd| j dkr|dkrtd||
d< ||d< |d k	rB| j dkr:td||d< |	|
|fS )Nra   a  Using `chunk_length_s` is very experimental with seq2seq models. The results will not necessarily be entirely accurate and will have caveats. More information: https://github.com/huggingface/transformers/pull/20104. Ignore this warning with pipeline(..., ignore_warning=True)chunk_length_sstride_length_sgenerate_kwargsmax_new_tokenszp`max_new_tokens` is defined both as an argument and inside `generate_kwargs` argument, please use only 1 versiondecoder_kwargszEWe cannot return_timestamps yet on non-CTC models apart from Whisper!rb   wordzRCTC with LM can only predict word level timestamps, set `return_timestamps='word'`rc   )charr   zCTC can either predict character level timestamps, or word level timestamps.Set `return_timestamps='char'` or `return_timestamps='word'` as required.r`   r   zWhisper cannot return `char` timestamps, only word level or segment level timestamps. Use `return_timestamps='word'` or `return_timestamps=True` respectively.return_timestampsz)Only Whisper can return language for now.return_language)rp   loggerwarningr   dictre   rm   )rv   r}   r~   Zignore_warningr   r   r   r   r   Zpreprocess_paramsZforward_paramsZpostprocess_paramsr   r   r   rt   g  sR    

z7AutomaticSpeechRecognitionPipeline._sanitize_parametersr   c              	   c   s  t |trJ|ds|dr,t|j}nt|d}| }W 5 Q R X t |trbt	|| j
j}d }i }t |tr|dd }d|krd|ksd|kstd|dd }|d kr|d	d  |dd }|d}|}|}|| j
jkr6t rd
dlm}	 ntd|	t||| j
j }| j
j| }
nd}
|d k	r|d
 |d  |jd
 krhtd|jd
 tt|d
 |
 tt|d |
 f}t |tjstdt| dt|jdkrtd|r|d kr|d }t |ttfr||g}t| j j!dd}tt|| j
j | | }tt|d
 | j
j | | }tt|d | j
j | | }||| k rtd| jdk}t"|| j
||||| j#D ]}|V  qnd| j
|| j
jdd}| j#d k	r|j$| j#d}|d k	r
| jdkrtd||d< ddi||V  d S )Nzhttp://zhttps://rbr   r#   rawr=   zWhen passing a dictionary to AutomaticSpeechRecognitionPipeline, the dict needs to contain a "raw" key containing the numpy array representing the audio and a "sampling_rate" key, containing the sampling_rate associated with that arraypathr   )
functionalztorchaudio is required to resample audio samples in AutomaticSpeechRecognitionPipeline. The torchaudio package can be installed through: `pip install torchaudio`.r   zStride is too large for inputz)We expect a numpy ndarray as input, got ``zMWe expect a single channel audio input for AutomaticSpeechRecognitionPipeline   inputs_to_logits_ratioz.Chunk length must be superior to stride lengthr`   r!   r"   r$   ra   z8Stride is only usable with CTC models, try removing it !r)   T)%rf   rk   
startswithrequestsrn   contentopenreadbytesr   r.   r#   r   ro   re   r   Z
torchaudior   ImportErrorZresamplerj   Z
from_numpynumpyr*   r   r   r<   ndarrayrp   r;   floatrd   rS   r\   r6   rZ   r,   )rv   r-   r}   r~   fr   extraZ_inputsZin_sampling_rateFr   Zalign_tor/   r0   r1   r2   itemr5   r   r   r   
preprocess  s    


  
0
  
        
z-AutomaticSpeechRecognitionPipeline.preprocessc                 C   s  |d kri }| dd }| dd }| d}| jdkr2| j }d|krX| d}n&d|krl| d}ntd|  |r| jdkr||d	< |d
krd|d< |d k	r|d | jj |d< | jjf |||d|d|}	|d
kr
| jdkr
|	d |	d d}
nd|	i}
| jdkr|d k	r||
d< n| d}| j||d}|j	}| jdkrfd|i}
nd|j
ddi}
|d k	rd| jjj }t|trt|g|d |
d< nt|||
d< |}d|i|
|S )Nattention_maskr   r)      ra   r`   r&   r(   zhSeq2Seq speech recognition model requires either a `input_features` or `input_values` key, but only has r`   r   r   TZreturn_token_timestampsr   Z
num_frames)r   )Zencoder_outputsr   rL   token_timestamps)tokensr   r   )r(   r   rb   logitsr'   )dimr   )ro   rp   rS   Zget_encoderre   keysr.   Z
hop_lengthgenerater   Zargmaxr\   r   rf   tupler    )rv   Zmodel_inputsr   r   r   r   r)   encoderr-   r   outr(   outputsr   r   r   r   r   r   _forward  sZ    







z+AutomaticSpeechRecognitionPipeline._forward)r   c           $      C   s  i }g }| j dkrdnd}d }|D ]^}	|	|  }
|	dd }|d k	rv| j dkrv|\}}}|| }|
d d ||f }
||
 q"|r| j dkrt|| j}
n| j dkr$| jj| jj	j
 }| jj}|D ]@}d|kr|d \}}}|| }|| }|| }|||f|d< q| jj||||d\}}ntj|d	d
}
|
d}
| j dkr|d krVi }| jj|
f|}|d d }|r|d d }g }|D ] \}\}}||||d qn^| j dkr| j dk}| jj|
|d}|r| jj|
|ddd }|dkr| j|| jj}|r| j dkrg }|D ]Z}|d | jj	j }|| jj }|d | jj	j } | | jj } ||| || fd q(||d< tt}!|D ]d}|dd  |dd  |dd  |dd  |dd  | D ]\}"}#|!|" |# qqd|i||!S )Nrb   r   r   r   >   rb   rc   ra   r`   )r   r   time_precisionr   )Zaxisr   r   )r   start_offset
end_offsetrc   )skip_special_tokensT)r   Zoutput_char_offsetsZchar_offsetsr   r   r   r   )text	timestampchunksr)   r   r   )rp   r   rn   r   rP   rH   r.   chunk_lengthrS   r\   max_source_positionsr#   Z_decode_asrr<   ZconcatenatesqueezerT   Zdecode_beamsdecodeZ_get_word_offsetsZreplace_word_delimiter_charr   r   listro   items)$rv   Zmodel_outputsr   r   r   optionalZfinal_itemskeyr   r   r   Ztotal_nr   r   Zright_nr   r#   outputr/   r0   r1   r   ZbeamsZchunk_offsetoffsetsr   r   r   r   r   r   startstopr   kvr   r   r   postprocessM  s    



  
z.AutomaticSpeechRecognitionPipeline.postprocess)
NNNNNrR   NNNF)NNNNNNNN)r   N)FN)NNN)rr   
__module____qualname____doc__r   rk   r   r	   r   r   r   boolry   r<   r   r   r{   rt   r   r   r   r   __classcell__r   r   r|   r   rQ      sV   C          

`C        
@
d
C     rQ   c           (      C   sP  | dd }g }|j| }d}t| D ]\}}	|	\}
}t|
trNt|
}
|\}}}|
d}
||
krt|
|kd d nd}|
|d }
|
|k}|dkr*t	|dkr*t|dd |dd @ d d }t|d d }||krt
||n|}||| 8 }t||j | }t||j | }t|
| || kd }|jd dkr*|d dkrx||d d  n|d }d}g }tt|D ]P\}}|dd }|d || | k r|dkr qt|dkrt|
d| |\}}}|dkr||kr|}|}t|
|d d |kd d d }|d | }|dkr|t|krt|
|d | d|d }|d |d< n\|dkr|
|d | }|dkr|d|d  n|d g} t|d| }|d  |7  < qt|dkr*||t|| d < |dt||  }|
|d }
|
|k}t|dd |dd @ d d }t	|dkrt|d d }||krt
||d n|}t|dkrd}!|D ]l}"|dks|!dkr|d d n|
d }#|
|!|" }$|$d |$d  }%|#|$d< |#|% |$d< |
|$ |"}!q||7 }q(g }&tt|D ]}'|&||'  7 }&q4|&S )a  
    Computes the final sequences by merging the end of the nth sequence with the beginning of the n+1th sequence. Since
    `WhisperForConditionalGeneration` produces the timestamps pairwise, we filter the consecutive timestamps and only
    iterate over them. We keep track of the `time` which indicates the actual starting time of the chunk that is
    processed. We need to make sure to offset the timestamps tokens by the `time` in order for the tokenizer to
    properly compute the final `offset`.
    z<|notimestamps|>r   r   Nr'   )Zconvert_tokens_to_idsr   	enumeraterf   r   r<   r=   r   whererJ   r   r   r#   r*   reversedr;   rD   insertr+   rI   )(rL   rH   r.   r   Ztimestamp_beginr   r   timeZseq_idxr   rM   r   r/   r0   r1   Z	begin_idxZtimestamp_tokensZconsecutiveZlast_timestampoffsetZoverlap_timeZrelevant_timestamp
best_matchZsliced_sequenceidxZprevious_sequenceZprevious_tokensrB   rC   Zmatch_lengthZbest_idxZend_of_curr_sequence_idxZprevious_sliceZ
last_sliceZcurrent_sliceZactual_offsetZsliced_tokensdurationresultr@   r   r   r   _find_timestamp_sequence  s    	




"&$ 
 
$  
"&(

r   )TN)+collectionsr   typingr   r   r   r   r   r<   r   rU   r   Ztokenization_utilsr	   utilsr
   r   r   Zaudio_utilsr   baser   r   r   Zpyctcdecoder   Z!feature_extraction_sequence_utilsr   Zmodeling_utilsr   Z
get_loggerrr   r   rj   Zmodels.auto.modeling_autor   r   r    r6   rD   rP   rQ   r   r   r   r   r   <module>   s4   

    '