U
    m){f'                  
   @  s8  d dl mZ d dlZd dlmZmZmZ d dlmZm	Z	 d dl
mZ d dlmZmZ d dlmZ d dlmZ d d	lmZmZ G d
d deZG dd deZG dd deZed Zeeef ZdZG dd dZddd dddddddddZG dd dZdddd d!d"Z d*ed$d%dddd&d&dd'd(d)Z!dS )+    )annotationsN)DictLiteral	TypedDict)	HTTPErrorURLError)	urlencode)Requesturlopen)NotRequired)	AudioData)RequestErrorUnknownValueErrorc                   @  s   e Zd ZU ded< ded< dS )Alternativestr
transcriptfloat
confidenceN__name__
__module____qualname____annotations__ r   r   d/var/www/html/Darija-Ai-API/env/lib/python3.8/site-packages/speech_recognition/recognizers/google.pyr      s   
r   c                   @  s   e Zd ZU ded< ded< dS )Resultlist[Alternative]alternativeboolfinalNr   r   r   r   r   r      s   
r   c                   @  s   e Zd ZU ded< ded< dS )GoogleResponsezlist[Result]resultzNotRequired[int]Zresult_indexNr   r   r   r   r   r       s   
r    )r      z-http://www.google.com/speech-api/v2/recognizec                   @  st   e Zd ZddddddddZddd	d
dZddddZddd	ddZddd	ddZedddddZ	dS )RequestBuilderr   ProfanityFilterLevelNoneendpointkeylanguagefilter_levelreturnc                C  s   || _ || _|| _|| _d S Nr'   r(   r)   r*   )selfr'   r(   r)   r*   r   r   r   __init__%   s    zRequestBuilder.__init__r   r	   )
audio_datar+   c                 C  s@   t |tstd|  }| |}| |}t|||d}|S )Nz!``audio_data`` must be audio data)dataheaders)
isinstancer   
ValueError	build_urlbuild_headers
build_datar	   )r.   r0   urlr2   	flac_datarequestr   r   r   build2   s    


zRequestBuilder.build)r+   c                 C  s(   t d| j| j| jd}| j d| S )a*  
        >>> builder = RequestBuilder(endpoint="http://www.google.com/speech-api/v2/recognize", key="awesome-key", language="en-US", filter_level=0)
        >>> builder.build_url()
        'http://www.google.com/speech-api/v2/recognize?client=chromium&lang=en-US&key=awesome-key&pFilter=0'
        Zchromium)clientlangr(   ZpFilter?)r   r)   r(   r*   r'   )r.   paramsr   r   r   r5   <   s    zRequestBuilder.build_urlRequestHeadersc                 C  s   |j }dd| i}|S )z
        >>> builder = RequestBuilder(endpoint="", key="", language="", filter_level=1)
        >>> audio_data = AudioData(b"", 16_000, 1)
        >>> builder.build_headers(audio_data)
        {'Content-Type': 'audio/x-flac; rate=16000'}
        zContent-Typezaudio/x-flac; rate=sample_rate)r.   r0   Zrater2   r   r   r   r6   L   s    zRequestBuilder.build_headersbytesc                 C  s   |j | |jdd}|S )N   )Zconvert_rateZconvert_width)Zget_flac_datato_convert_raterB   )r.   r0   r9   r   r   r   r7   W   s
    
zRequestBuilder.build_dataint)rB   r+   c                 C  s   | dkrdS dS )zAudio samples must be at least 8 kHz

        >>> RequestBuilder.to_convert_rate(16_000)
        >>> RequestBuilder.to_convert_rate(8_000)
        >>> RequestBuilder.to_convert_rate(7_999)
        8000
        i@  Nr   rA   r   r   r   rE   ^   s    	zRequestBuilder.to_convert_rateN)
r   r   r   r/   r;   r5   r6   r7   staticmethodrE   r   r   r   r   r#   $   s   
r#   en-US)r(   r)   r*   r   z
str | Noner$   r&   c                 C  sH   t |tstd|d k	r,t |ts,td|d kr8d}t| |||dS )Nz``language`` must be a stringz$``key`` must be ``None`` or a stringz'AIzaSyBOti4mM-6x9WDnZIjIeyEU21OpBXqWBgwr-   )r3   r   r4   r#   r-   r   r   r   create_request_builderj   s    
rI   c                   @  sT   e Zd ZddddddZdddd	Zedd
dddZedddddZdS )OutputParserr   r%   )show_allwith_confidencer+   c                C  s   || _ || _d S r,   rK   rL   )r.   rK   rL   r   r   r   r/      s    zOutputParser.__init__r   )response_textc                 C  sH   |  |}| jr|S | |d }|dd}| jr@|d |fS |d S )Nr   r   g      ?r   )convert_to_resultrK   find_best_hypothesisgetrL   )r.   rN   Zactual_resultbest_hypothesisr   r   r   r   parse   s    
zOutputParser.parser   )rN   r+   c                 C  sd   |  dD ]N}|sq
t|d }t|dkr
t|d dg dkrLt |d   S q
t dS )a  
        >>> response_text = '''{"result":[]}
        ... {"result":[{"alternative":[{"transcript":"one two three","confidence":0.49585345},{"transcript":"1 2","confidence":0.42899391}],"final":true}],"result_index":0}
        ... '''
        >>> OutputParser.convert_to_result(response_text)
        {'alternative': [{'transcript': 'one two three', 'confidence': 0.49585345}, {'transcript': '1 2', 'confidence': 0.42899391}], 'final': True}

        >>> OutputParser.convert_to_result("")
        Traceback (most recent call last):
          ...
        speech_recognition.exceptions.UnknownValueError
        >>> OutputParser.convert_to_result('\n{"result":[]}')
        Traceback (most recent call last):
          ...
        speech_recognition.exceptions.UnknownValueError
        >>> OutputParser.convert_to_result('{"result":[{"foo": "bar"}]}')
        Traceback (most recent call last):
          ...
        speech_recognition.exceptions.UnknownValueError
        >>> OutputParser.convert_to_result('{"result":[{"alternative": []}]}')
        Traceback (most recent call last):
          ...
        speech_recognition.exceptions.UnknownValueError
        
r!   r   r   N)splitjsonloadslenrQ   r   )rN   liner!   r   r   r   rO      s    zOutputParser.convert_to_resultr   r   )alternativesr+   c                 C  s4   d| krt | dd d}n| d }d|kr0t |S )a  
        >>> alternatives = [{"transcript": "one two three", "confidence": 0.42899391}, {"transcript": "1 2", "confidence": 0.49585345}]
        >>> OutputParser.find_best_hypothesis(alternatives)
        {'transcript': 'one two three', 'confidence': 0.42899391}

        >>> alternatives = [{"confidence": 0.49585345}]
        >>> OutputParser.find_best_hypothesis(alternatives)
        Traceback (most recent call last):
          ...
        speech_recognition.exceptions.UnknownValueError
        r   c                 S  s   | d S )Nr   r   )r   r   r   r   <lambda>       z3OutputParser.find_best_hypothesis.<locals>.<lambda>)r(   r   r   )maxr   )rZ   rR   r   r   r   rP      s    z!OutputParser.find_best_hypothesisN)r   r   r   r/   rS   rG   rO   rP   r   r   r   r   rJ      s   $rJ   r	   rF   )r:   timeoutr+   c              
   C  s   zt | |d}W nb tk
rB } ztd|jW 5 d }~X Y n2 tk
rr } ztd|jW 5 d }~X Y nX | dS )Nr^   zrecognition request failed: {}z!recognition connection failed: {}zutf-8)r
   r   r   formatreasonr   readdecode)r:   r^   responseer   r   r   obtain_transcription   s     
rf   F)r'   r   r   )r0   r(   r)   pfilterrK   rL   r'   c                C  s>   t ||||d}||}	t|	| jd}
t||d}||
S )a  
    Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Google Speech Recognition API.

    The Google Speech Recognition API key is specified by ``key``. If not specified, it uses a generic key that works out of the box. This should generally be used for personal or testing purposes only, as it **may be revoked by Google at any time**.

    To obtain your own API key, simply following the steps on the `API Keys <http://www.chromium.org/developers/how-tos/api-keys>`__ page at the Chromium Developers site. In the Google Developers Console, Google Speech Recognition is listed as "Speech API".

    The recognition language is determined by ``language``, an RFC5646 language tag like ``"en-US"`` (US English) or ``"fr-FR"`` (International French), defaulting to US English. A list of supported language tags can be found in this `StackOverflow answer <http://stackoverflow.com/a/14302134>`__.

    The profanity filter level can be adjusted with ``pfilter``: 0 - No filter, 1 - Only shows the first character and replaces the rest with asterisks. The default is level 0.

    Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the raw API response as a JSON dictionary.

    Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection.
    r-   r_   rM   )rI   r;   rf   Zoperation_timeoutrJ   rS   )Z
recognizerr0   r(   r)   rg   rK   rL   r'   Zrequest_builderr:   rN   Zoutput_parserr   r   r   recognize_legacy   s        
  rh   )NrH   r   FF)"
__future__r   rV   typingr   r   r   urllib.errorr   r   urllib.parser   urllib.requestr	   r
   Ztyping_extensionsr   Zspeech_recognition.audior   Zspeech_recognition.exceptionsr   r   r   r   r    r$   r   r@   ZENDPOINTr#   rI   rJ   rf   rh   r   r   r   r   <module>   s:   IU     	