U
    m){f^                 	   @  s  d Z ddlmZ ddlZddlZddlZddlZddlZddlZddl	Z	ddl
Z
ddlZddlZddlZddlZddlZddlZddlZddlZddlZddlmZmZ ddlmZ ddlmZmZ zddlZW n eefk
r   Y nX ddlm Z m!Z! dd	l"m#Z#m$Z$m%Z%m&Z&m'Z' d
Z(dZ)dZ*G dd de+Z,G dd de,Z-G dd de,Z.G dd de,Z/G dd de+Z0zddl1m2Z2m3Z3 W n eefk
r   Y nX e2j4e/_5e3j6e/_6e.Z7dddZ8e9e8e/_8dS )ziLibrary for performing speech recognition, with support for several engines and APIs, online and offline.    )annotationsN)	HTTPErrorURLError)	urlencode)Requesturlopen   )	AudioDataget_flac_converter)RequestErrorTranscriptionFailedTranscriptionNotReadyUnknownValueErrorWaitTimeoutErrorzAnthony Zhang (Uberi)z3.10.4ZBSDc                   @  s$   e Zd Zdd Zdd Zdd ZdS )AudioSourcec                 C  s   t dd S Nzthis is an abstract classNotImplementedErrorself r   Z/var/www/html/Darija-Ai-API/env/lib/python3.8/site-packages/speech_recognition/__init__.py__init__0   s    zAudioSource.__init__c                 C  s   t dd S r   r   r   r   r   r   	__enter__3   s    zAudioSource.__enter__c                 C  s   t dd S r   r   r   exc_type	exc_value	tracebackr   r   r   __exit__6   s    zAudioSource.__exit__N)__name__
__module____qualname__r   r   r   r   r   r   r   r   /   s   r   c                   @  s^   e Zd ZdZdddZedd Zedd	 Zed
d Zdd Z	dd Z
G dd deZdS )
Microphoneaz  
    Creates a new ``Microphone`` instance, which represents a physical microphone on the computer. Subclass of ``AudioSource``.

    This will throw an ``AttributeError`` if you don't have PyAudio 0.2.11 or later installed.

    If ``device_index`` is unspecified or ``None``, the default microphone is used as the audio source. Otherwise, ``device_index`` should be the index of the device to use for audio input.

    A device index is an integer between 0 and ``pyaudio.get_device_count() - 1`` (assume we have used ``import pyaudio`` beforehand) inclusive. It represents an audio device such as a microphone or speaker. See the `PyAudio documentation <http://people.csail.mit.edu/hubert/pyaudio/docs/>`__ for more details.

    The microphone audio is recorded in chunks of ``chunk_size`` samples, at a rate of ``sample_rate`` samples per second (Hertz). If not specified, the value of ``sample_rate`` is determined automatically from the system's microphone settings.

    Higher ``sample_rate`` values result in better audio quality, but also more bandwidth (and therefore, slower recognition). Additionally, some CPUs, such as those in older Raspberry Pi models, can't keep up if this value is too high.

    Higher ``chunk_size`` values help avoid triggering on rapidly changing ambient noise, but also makes detection less sensitive. This value, generally, should be left at its default.
    N   c                 C  sN  |d kst |tstd|d ks<t |tr4|dks<tdt |trN|dksVtd|  | _| j }z| }|d k	rd|  kr|k sn td||d |d kr|d k	r|	|n|
 }t |dttfr|d dkstd|t|d }W 5 |  X || _| jj| _| j| j| _|| _|| _d | _d | _d S )	Nz'Device index must be None or an integerr   z.Sample rate must be None or a positive integerz%Chunk size must be a positive integerzcDevice index out of range ({} devices available; device index should be between 0 and {} inclusive)r   defaultSampleRate-Invalid device info returned from PyAudio: {})
isinstanceintAssertionErrorget_pyaudiopyaudio_modulePyAudio	terminateget_device_countformatget_device_info_by_indexZget_default_input_device_infogetfloatdevice_indexpaInt16Zget_sample_sizeSAMPLE_WIDTHSAMPLE_RATECHUNKaudiostream)r   r2   sample_rate
chunk_sizer7   countdevice_infor   r   r   r   J   s*    "

*
.

zMicrophone.__init__c                  C  s\   zddl } W n tk
r(   tdY nX ddlm} || j|dk rXtd| j| S )z
        Imports the pyaudio module and checks its version. Throws exceptions if pyaudio can't be found or a wrong version is installed
        r   Nz*Could not find PyAudio; check installation)LooseVersionz0.2.11z6PyAudio 0.2.11 or later is required (found version {}))pyaudioImportErrorAttributeErrorZdistutils.versionr=   __version__r.   )r>   r=   r   r   r   r)   f   s    zMicrophone.get_pyaudioc                  C  sP   t   } z4g }t|  D ]}| |}||d qW 5 |   X |S )a  
        Returns a list of the names of all available microphones. For microphones where the name can't be retrieved, the list entry contains ``None`` instead.

        The index of each microphone's name in the returned list is the same as its device index when creating a ``Microphone`` instance - if you want to use the microphone at index 3 in the returned list, use ``Microphone(device_index=3)``.
        name)	r"   r)   r+   r,   ranger-   r/   appendr0   )r7   resultir<   r   r   r   list_microphone_namest   s    

z Microphone.list_microphone_namesc               	   C  s>  t  } |  }zi }t| D ]}||}|d}t|dt	t
fr^|d dksltd|zL|j|d| jt
|d dd}z|d}| s|  W 5 |  X W n tk
r   Y q$Y nX t|d	 }t|d
@ |d? d
@ g}	tt||	t|d	  d	d	}
|
dkr$|||< q$W 5 |  X |S )a  
        Returns a dictionary mapping device indices to microphone names, for microphones that are currently hearing sounds. When using this function, ensure that your microphone is unmuted and make some noise at it to ensure it will be detected as working.

        Each key in the returned dictionary can be passed to the ``Microphone`` constructor to use that microphone. For example, if the return value is ``{3: "HDA Intel PCH: ALC3232 Analog (hw:1,0)"}``, you can do ``Microphone(device_index=3)`` to use that microphone.
        rB   r$   r   r%   r   T)input_device_indexchannelsr.   rateinputr#               )r"   r)   r+   r,   rC   r-   r/   r0   r&   r1   r'   r(   r.   openr3   closeread
is_stoppedstop_stream	Exceptionaudiooprmsbytesaddlen)r*   r7   rE   r2   r<   Zdevice_namepyaudio_streambufferenergyZenergy_bytesZdebiased_energyr   r   r   list_working_microphones   s<    

.  
 
 
"
z#Microphone.list_working_microphonesc              
   C  sn   | j d kstd| j | _z,t| jj| jd| j	| j
| jdd| _ W n tk
rh   | j  Y nX | S )N5This audio source is already inside a context managerr   T)rH   rI   r.   rJ   Zframes_per_bufferrK   )r8   r(   r*   r+   r7   r"   MicrophoneStreamrP   r2   r.   r5   r6   rU   r,   r   r   r   r   r      s         
zMicrophone.__enter__c                 C  s&   z| j   W 5 d | _ | j  X d S N)r8   r7   r,   rQ   r   r   r   r   r      s    zMicrophone.__exit__c                   @  s$   e Zd Zdd Zdd Zdd ZdS )zMicrophone.MicrophoneStreamc                 C  s
   || _ d S ra   )r[   )r   r[   r   r   r   r      s    z$Microphone.MicrophoneStream.__init__c                 C  s   | j j|ddS )NF)Zexception_on_overflow)r[   rR   r   sizer   r   r   rR      s    z Microphone.MicrophoneStream.readc                 C  s*   z| j  s| j   W 5 | j   X d S ra   )r[   rQ   rS   rT   r   r   r   r   rQ      s    
z!Microphone.MicrophoneStream.closeN)r   r    r!   r   rR   rQ   r   r   r   r   r`      s   r`   )NNr#   )r   r    r!   __doc__r   staticmethodr)   rG   r^   r   r   objectr`   r   r   r   r   r"   :   s   



'r"   c                   @  s8   e Zd ZdZdd Zdd Zdd ZG dd	 d	eZd
S )	AudioFilea{  
    Creates a new ``AudioFile`` instance given a WAV/AIFF/FLAC audio file ``filename_or_fileobject``. Subclass of ``AudioSource``.

    If ``filename_or_fileobject`` is a string, then it is interpreted as a path to an audio file on the filesystem. Otherwise, ``filename_or_fileobject`` should be a file-like object such as ``io.BytesIO`` or similar.

    Note that functions that read from the audio (such as ``recognizer_instance.record`` or ``recognizer_instance.listen``) will move ahead in the stream. For example, if you execute ``recognizer_instance.record(audiofile_instance, duration=10)`` twice, the first time it will return the first 10 seconds of audio, and the second time it will return the 10 seconds of audio right after that. This is always reset to the beginning when entering an ``AudioFile`` context.

    WAV files must be in PCM/LPCM format; WAVE_FORMAT_EXTENSIBLE and compressed WAV are not supported and may result in undefined behaviour.

    Both AIFF and AIFF-C (compressed AIFF) formats are supported.

    FLAC files must be in native FLAC format; OGG-FLAC is not supported and may result in undefined behaviour.
    c                 C  s\   t |tdtdfs(t|ds(td|| _d | _d | _d | _d| _d | _	d | _
d | _d S )N rR   z@Given audio file must be a filename string or a file-like objectF)r&   typehasattrr(   filename_or_fileobjectr8   DURATIONaudio_readerlittle_endianr5   r6   FRAME_COUNT)r   rk   r   r   r   r      s    (zAudioFile.__init__c           
      C  s0  | j d kstdzt| jd| _d| _W n< tjtfk
rj   zt	| jd| _d| _W n t	jtfk
rd   t
| jdr| j }n t| jd}| }W 5 Q R X t }tjdkrt }| jtjO  _tj|_nd }tj|ddd	d
dgtjtj|d}||\}}t|}zt	|d| _W n$ t	jtfk
rX   tdY nX d| _Y nX Y nX d| j   krdksn td| j | _d}	| jdkrztd| jd W n" tj k
r   d}	d| _Y nX | j! | _"d| _#| j$ | _%| j%t&| j" | _'t()| j| j|	| _ | S )Nr_   rbTFrR   ntz--stdoutz--totally-silentz--decodez--force-aiff-format-)stdinstdoutstartupinfozuAudio file could not be read as PCM WAV, AIFF/AIFF-C, or Native FLAC; check if file is corrupted or in another formatr   rL   zAudio must be mono or stereo       r      i   )*r8   r(   waverP   rk   rm   rn   ErrorEOFErroraifcrj   rR   r
   osrB   
subprocessZSTARTUPINFOZdwFlagsZSTARTF_USESHOWWINDOWZSW_HIDEZwShowWindowPopenPIPEcommunicateioBytesIO
ValueErrorgetnchannelsgetsampwidthr4   rV   ZbiaserrorZgetframerater5   r6   
getnframesro   r1   rl   rg   AudioFileStream)
r   	flac_datafZflac_converterZstartup_infoprocessZ	aiff_data_Z	aiff_file&samples_24_bit_pretending_to_be_32_bitr   r   r   r      sj     

    
( zAudioFile.__enter__c                 C  s&   t | jds| j  d | _d | _d S )NrR   )rj   rk   rm   rQ   r8   rl   r   r   r   r   r   &  s    
zAudioFile.__exit__c                   @  s   e Zd Zdd ZdddZdS )zAudioFile.AudioFileStreamc                 C  s   || _ || _|| _d S ra   )rm   rn   r   )r   rm   rn   r   r   r   r   r   -  s    z"AudioFile.AudioFileStream.__init__c                   s   | j |dkr| j  n| t ts,d | j  | jsttdrTt	  n< d d d d
 fddtd t D   | jrd
 fddtdt D  d	| j  dkrt dd  S )
Nr   rw   byteswapr   c                 3  s    | ]} | |d  V  qdS )r   Nr   .0rF   r\   sample_widthr   r   	<genexpr>;  s     z1AudioFile.AudioFileStream.read.<locals>.<genexpr>c                 3  s"   | ]}d  ||   V  qdS )    Nr   r   r   r   r   r   ?  s     r   rx   )rm   Z
readframesr   r&   rX   r   rn   rj   rV   r   joinrC   rZ   r   r   Ztomonorb   r   r   r   rR   2  s    
 

<&zAudioFile.AudioFileStream.readN)r   )r   r    r!   r   rR   r   r   r   r   r   ,  s   r   N)	r   r    r!   rd   r   r   r   rf   r   r   r   r   r   rg      s
   9rg   c                   @  s   e Zd Zdd Zd3ddZd4ddZd5d	d
Zd6ddZd7ddZd8ddZ	d9ddZ
d:ddZd;ddZd<ddZd=ddZd>d d!Zd?d"d#Zd@d$d%ZdAd&d'Zd(ZdZdBd+d,ZdCd.d/ZdDd1d2ZdS )E
Recognizerc                 C  s4   d| _ d| _d| _d| _d| _d| _d| _d| _dS )	z{
        Creates a new ``Recognizer`` instance, which represents a collection of speech recognition functionality.
        i,  Tg333333?g      ?g?Ng333333?      ?)energy_thresholddynamic_energy_threshold!dynamic_energy_adjustment_dampingdynamic_energy_ratiopause_thresholdoperation_timeoutphrase_thresholdnon_speaking_durationr   r   r   r   r   G  s    zRecognizer.__init__Nc                 C  s   t |tstd|jdk	s$tdt }|jd |j }d}d}d}|rd|sd||7 }||krdd}|j|j}	t	|	dkrq|s|sH||7 }|r||krq|
|	 qH| }
|  t|
|j|jS )a>  
        Records up to ``duration`` seconds of audio from ``source`` (an ``AudioSource`` instance) starting at ``offset`` (or at the beginning if not specified) into an ``AudioData`` instance, which it returns.

        If ``duration`` is not specified, then it will record until there is no more audio input.
        Source must be an audio sourceNzAudio source must be entered before recording, see documentation for ``AudioSource``; are you using ``source`` outside of a ``with`` statement?        r   FT)r&   r   r(   r8   r   r   r6   r5   rR   rZ   writegetvaluerQ   r	   r4   )r   sourcedurationoffsetframesseconds_per_bufferelapsed_timeZoffset_timeZoffset_reachedr\   
frame_datar   r   r   recordU  s,      zRecognizer.recordr   c           	      C  s   t |tstd|jdk	s$td| j| j  kr<dksBn t|jd |j }d}||7 }||krhq|j|j}t	
||j}| j| }|| j }| j| |d|   | _qVdS )a-  
        Adjusts the energy threshold dynamically using audio from ``source`` (an ``AudioSource`` instance) to account for ambient noise.

        Intended to calibrate the energy threshold with the ambient energy level. Should be used on periods of audio without speech - will stop early if any speech is detected.

        The ``duration`` parameter is the maximum number of seconds that it will dynamically adjust the threshold for before returning. This value should be at least 0.5 in order to get a representative sample of the ambient noise.
        r   NzAudio source must be entered before adjusting, see documentation for ``AudioSource``; are you using ``source`` outside of a ``with`` statement?r   r   r   )r&   r   r(   r8   r   r   r6   r5   rR   rV   rW   r4   r   r   r   )	r   r   r   r   r   r\   r]   dampingtarget_energyr   r   r   adjust_for_ambient_noisev  s     

z#Recognizer.adjust_for_ambient_noisec                 C  s  t j| dd l}t j  |jtj|dd d| d}|	d |
ddgt|   | }d}t|j|j }	d }
ttd|	 }ttd	|	 }tj|d
}tj|d
}d}t }||	7 }|r||krtd|j|j}t|dkrq|| t||jd|j||
\}}
|| t | |kr|d|}|dksttd|dkrq|  t }qd||fS )Nr   	resourcesz
common.res,)resource_filenameZ	model_strg      ?z0.4   r   )maxleng?z8listening timed out while waiting for hotword to be saidr   rw   r   z0Error initializing streams or reading audio data)syspathrD   snowboydetectpopZSnowboyDetectr}   r   encodeZSetAudioGainZSetSensitivityrZ   Z
SampleRater1   r6   r5   r'   mathceilcollectionsdequetimer   r8   rR   rV   Zratecvr4   ZRunDetectionr(   clear)r   snowboy_locationsnowboy_hot_word_filesr   timeoutr   detectorZsnowboy_sample_rater   r   Zresampling_stateZfive_seconds_buffer_countZhalf_second_buffer_countr   Zresampled_framesZcheck_interval
last_checkr\   Zresampled_bufferZsnowboy_resultr   r   r   snowboy_wait_for_hot_word  sH    

 


 
z$Recognizer.snowboy_wait_for_hot_wordc                 C  s  t |tstd|jdk	s$td| j| j  kr<dksBn t|dk	rtjtj	|d dsltd|d D ]}tj|sttdqtt
|j|j }tt| j| }tt| j| }tt| j| }	d}
d	}t }|dkr|
|7 }
|r|
|krtd
|j|j}t|dkr4q|| t||	krT|  t||j}|| jkrrq| jr| j| }|| j }| j| |d|   | _qn@|\}}| ||||\}}|
|7 }
t|dkrސq|| d\}}|
}|
|7 }
|r|
| |krq|j|j}t|dkr4q|| |d7 }t||j}|| jkrfd}n|d7 }||krqq||8 }||kst|dkrqqt ||	 D ]}|!  qd		|}t"||j|jS )a^  
        Records a single phrase from ``source`` (an ``AudioSource`` instance) into an ``AudioData`` instance, which it returns.

        This is done by waiting until the audio has an energy above ``recognizer_instance.energy_threshold`` (the user has started speaking), and then recording until it encounters ``recognizer_instance.pause_threshold`` seconds of non-speaking or there is no more audio input. The ending silence is not included.

        The ``timeout`` parameter is the maximum number of seconds that this will wait for a phrase to start before giving up and throwing an ``speech_recognition.WaitTimeoutError`` exception. If ``timeout`` is ``None``, there will be no wait timeout.

        The ``phrase_time_limit`` parameter is the maximum number of seconds that this will allow a phrase to continue before stopping and returning the part of the phrase processed before the time limit was reached. The resulting audio will be the phrase cut off at the time limit. If ``phrase_timeout`` is ``None``, there will be no phrase time limit.

        The ``snowboy_configuration`` parameter allows integration with `Snowboy <https://snowboy.kitt.ai/>`__, an offline, high-accuracy, power-efficient hotword recognition engine. When used, this function will pause until Snowboy detects a hotword, after which it will unpause. This parameter should either be ``None`` to turn off Snowboy support, or a tuple of the form ``(SNOWBOY_LOCATION, LIST_OF_HOT_WORD_FILES)``, where ``SNOWBOY_LOCATION`` is the path to the Snowboy root directory, and ``LIST_OF_HOT_WORD_FILES`` is a list of paths to Snowboy hotword configuration files (`*.pmdl` or `*.umdl` format).

        This operation will always complete within ``timeout + phrase_timeout`` seconds if both are numbers, either by returning the audio data, or by raising a ``speech_recognition.WaitTimeoutError`` exception.
        r   NzAudio source must be entered before listening, see documentation for ``AudioSource``; are you using ``source`` outside of a ``with`` statement?r   zsnowboydetect.pyz]``snowboy_configuration[0]`` must be a Snowboy root directory containing ``snowboydetect.py``r   zS``snowboy_configuration[1]`` must be a list of Snowboy hot word configuration filesrw   z5listening timed out while waiting for phrase to start)r   r   )#r&   r   r(   r8   r   r   r}   r   isfiler   r1   r6   r5   r'   r   r   r   r   r   r   rR   rZ   rD   popleftrV   rW   r4   r   r   r   r   r   rC   r   r	   )r   r   r   phrase_time_limitZsnowboy_configurationZhot_word_filer   Zpause_buffer_countZphrase_buffer_countZnon_speaking_buffer_countr   r\   r   r]   r   r   r   r   Z
delta_timeZpause_countZphrase_countZphrase_start_timerF   r   r   r   r   listen  sx    "
 
 

 
 

  
zRecognizer.listenc                   sZ   t tstddg fdd}dfdd	}tj|dd_  |S )	as  
        Spawns a thread to repeatedly record phrases from ``source`` (an ``AudioSource`` instance) into an ``AudioData`` instance and call ``callback`` with that ``AudioData`` instance as soon as each phrase are detected.

        Returns a function object that, when called, requests that the background listener thread stop. The background thread is a daemon and will not stop the program from exiting if there are no other non-daemon threads. The function accepts one parameter, ``wait_for_stop``: if truthy, the function will wait for the background listener to stop before returning, otherwise it will return immediately and the background listener thread might still be running for a second or two afterwards. Additionally, if you are using a truthy value for ``wait_for_stop``, you must call the function from the same thread you originally called ``listen_in_background`` from.

        Phrase recognition uses the exact same mechanism as ``recognizer_instance.listen(source)``. The ``phrase_time_limit`` parameter works in the same way as the ``phrase_time_limit`` parameter for ``recognizer_instance.listen(source)``, as well.

        The ``callback`` parameter is a function that should accept two parameters - the ``recognizer_instance``, and an ``AudioData`` instance representing the captured audio. Note that ``callback`` function will be called from a non-main thread.
        r   Tc               	     sX   J} d rJz | d}W n tk
r4   Y qX d r | qW 5 Q R X d S )Nr   r   )r   r   )sr7   )callbackr   runningr   r   r   r   threaded_listen0  s     z8Recognizer.listen_in_background.<locals>.threaded_listenc                   s   dd< | r    d S )NFr   )r   )Zwait_for_stop)listener_threadr   r   r   stopper:  s    z0Recognizer.listen_in_background.<locals>.stopper)target)T)r&   r   r(   	threadingThreaddaemonstart)r   r   r   r   r   r   r   )r   r   r   r   r   r   r   listen_in_background#  s    

zRecognizer.listen_in_backgrounden-USFc              	   C  sV  t |tstdt |ts:t |tr2t|dks:td|dks\tdd |D s\tdzdd	lm}m	}m} W n6 t
k
r   td
Y n tk
r   tdY nX t|drt|jdstdt |tr>tjtjtjtd|}	tj|	std|	tj|	d}
tj|	d}tj|	d}n
|\}
}}tj|
sdtd|
tj|std|tj|std||j }|d|
 |d| |d| |dtj ||}|jddd}|dk	rDtd:}|dd |D  |  |d |j  |!d  W 5 Q R X n|dk	rtj"|sjtd!|tj#tj|}tj$tj%|d }d"||}tj"|s||}|&d#|}|'||( d$}|)| n|||( d$}|*|| |!| |+  |,|d%d& |-  |r4|S |. }|dk	rL|j/S t0 dS )'al  
        Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using CMU Sphinx.

        The recognition language is determined by ``language``, an RFC5646 language tag like ``"en-US"`` or ``"en-GB"``, defaulting to US English. Out of the box, only ``en-US`` is supported. See `Notes on using `PocketSphinx <https://github.com/Uberi/speech_recognition/blob/master/reference/pocketsphinx.rst>`__ for information about installing other languages. This document is also included under ``reference/pocketsphinx.rst``. The ``language`` parameter can also be a tuple of filesystem paths, of the form ``(acoustic_parameters_directory, language_model_file, phoneme_dictionary_file)`` - this allows you to load arbitrary Sphinx models.

        If specified, the keywords to search for are determined by ``keyword_entries``, an iterable of tuples of the form ``(keyword, sensitivity)``, where ``keyword`` is a phrase, and ``sensitivity`` is how sensitive to this phrase the recognizer should be, on a scale of 0 (very insensitive, more false negatives) to 1 (very sensitive, more false positives) inclusive. If not specified or ``None``, no keywords are used and Sphinx will simply transcribe whatever words it recognizes. Specifying ``keyword_entries`` is more accurate than just looking for those same keywords in non-keyword-based transcriptions, because Sphinx knows specifically what sounds to look for.

        Sphinx can also handle FSG or JSGF grammars. The parameter ``grammar`` expects a path to the grammar file. Note that if a JSGF grammar is passed, an FSG grammar will be created at the same location to speed up execution in the next run. If ``keyword_entries`` are passed, content of ``grammar`` will be ignored.

        Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the Sphinx ``pocketsphinx.pocketsphinx.Decoder`` object resulting from the recognition.

        Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if there are any issues with the Sphinx installation.
        !``audio_data`` must be audio datarv   z``language`` must be a string or 3-tuple of Sphinx data file paths of the form ``(acoustic_parameters, language_model, phoneme_dictionary)``Nc                 s  s@   | ]8\}}t |td td fo6d|  ko2dkn  V  qdS )rh   r   r   Nr&   ri   r   keywordZsensitivityr   r   r   r   T  s     z.Recognizer.recognize_sphinx.<locals>.<genexpr>z^``keyword_entries`` must be ``None`` or a list of pairs of strings and numbers between 0 and 1r   )FsgModelJsgfpocketsphinxzJmissing PocketSphinx module: ensure that PocketSphinx is set up correctly.zUbad PocketSphinx installation; try reinstalling PocketSphinx version 0.0.9 or better.Decoderdefault_configzYoutdated PocketSphinx installation; ensure you have PocketSphinx version 0.0.9 or better.zpocketsphinx-dataz2missing PocketSphinx language data directory: "{}"zacoustic-modelzlanguage-model.lm.binzpronounciation-dictionary.dictz>missing PocketSphinx language model parameters directory: "{}"z.missing PocketSphinx language model file: "{}"z2missing PocketSphinx phoneme dictionary file: "{}"z-hmmz-lmz-dictz-logfn>  rL   convert_rateZconvert_widthwc                 s  s&   | ]\}}d  |d| d V  qdS )z
{} /1e{}/
d   n   N)r.   r   r   r   r   r     s     keywordszGrammar '{0}' does not exist.z{0}/{1}.fsgz{0}.{0}g      @FT)1r&   r	   r(   strtuplerZ   allr   r   r   r?   r   r   rj   r   r}   r   r   dirnamerealpath__file__isdirr.   r   r   Z
set_stringdevnullget_raw_dataPortableNamedTemporaryFile
writelinesflushZset_kwsrB   Z
set_searchexistsabspathsplitextbasenameZget_ruleZ	build_fsgZget_logmathZ	writefileZset_fsgZ	start_uttZprocess_rawZend_uttZhypZhypstrr   )r   
audio_datalanguageZkeyword_entriesZgrammarshow_allr   r   r   Zlanguage_directoryZacoustic_parameters_directoryZlanguage_model_fileZphoneme_dictionary_fileconfigdecoderraw_datar   Zgrammar_pathZgrammar_nameZfsg_pathjsgfruleZfsg
hypothesisr   r   r   recognize_sphinxD  sz    (" 






 
 zRecognizer.recognize_sphinxc              
   C  s6  t |tstd|dkr.tjddk	s.tt |ts@td|dksbtdd |D sbtdz$ddl}dd	l	m
} dd
lm} W n tk
r   tdY nX |dk	r|j|}	n| }	|jd|j  krdkrn ndntdt|jddd}
|j|
d}|jjj|j|d}|dk	r8|j|dg|d< |rFd|d< i }| jrj| dkrj| j|d< |jf |}z|	j||d}W n^ |k
r } zt|W 5 d}~X Y n4 tk
r } ztd|jW 5 d}~X Y nX |r|S t |j!dkrt" d}|j!D ]}||j#d j$% d 7 }q|S )a  
        Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Google Cloud Speech API.

        This function requires a Google Cloud Platform account; see the `Google Cloud Speech API Quickstart <https://cloud.google.com/speech/docs/getting-started>`__ for details and instructions. Basically, create a project, enable billing for the project, enable the Google Cloud Speech API for the project, and set up Service Account Key credentials for the project. The result is a JSON file containing the API credentials. The text content of this JSON file is specified by ``credentials_json``. If not specified, the library will try to automatically `find the default API credentials JSON file <https://developers.google.com/identity/protocols/application-default-credentials>`__.

        The recognition language is determined by ``language``, which is a BCP-47 language tag like ``"en-US"`` (US English). A list of supported language tags can be found in the `Google Cloud Speech API documentation <https://cloud.google.com/speech/docs/languages>`__.

        If ``preferred_phrases`` is an iterable of phrase strings, those given phrases will be more likely to be recognized over similar-sounding alternatives. This is useful for things like keyword/command recognition or adding new phrases that aren't in Google's vocabulary. Note that the API imposes certain `restrictions on the list of phrase strings <https://cloud.google.com/speech/limits#content>`__.

        Returns the most likely transcription if ``show_all`` is False (the default). Otherwise, returns the raw API response as a JSON dictionary.

        Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the credentials aren't valid, or if there is no Internet connection.
        r   NZGOOGLE_APPLICATION_CREDENTIALS``language`` must be a stringc                 s  s$   | ]}t |td td fV  qdS )rh   Nr   )r   preferred_phrasesr   r   r   r     s     z4Recognizer.recognize_google_cloud.<locals>.<genexpr>z/``preferred_phrases`` must be a list of stringsr   )GoogleAPICallError)speechzXmissing google-cloud-speech module: ensure that google-cloud-speech is set up correctly.@  i  rL   r   )content)encodingZsample_rate_hertzZlanguage_code)ZphrasesZspeechContextsTZenableWordTimeOffsetsr   )r   r7   z"recognition connection failed: {0}rh    )&r&   r	   r(   r}   environr0   r   r   socketZgoogle.api_core.exceptionsr  Zgoogle.cloudr  r?   r   ZSpeechClientZfrom_service_account_jsonget_flac_datar9   maxminZRecognitionAudioZRecognitionConfigZAudioEncodingZFLACZSpeechContextr   getdefaulttimeoutZ	recognizer   r.   reasonrZ   resultsr   alternatives
transcriptstrip)r   r   Zcredentials_jsonr   r  r   r  r  r  clientr   r7   r   optsresponseer  rE   r   r   r   recognize_google_cloud  s^    ".

"  
z!Recognizer.recognize_google_cloudc              
   C  s  t |tstdt |ts$td|j|jdkr6dnddd}d}t||d|d	d
d}zt|| j	d}W nb t
k
r } ztd|jW 5 d}~X Y n2 tk
r } ztd|jW 5 d}~X Y nX | d}	t|	}
|r|
S d|
ks|
d dkrt |
d S )a"  
        Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Wit.ai API.

        The Wit.ai API key is specified by ``key``. Unfortunately, these are not available without `signing up for an account <https://wit.ai/>`__ and creating an app. You will need to add at least one intent to the app before you can see the API key, though the actual intent settings don't matter.

        To get the API key for a Wit.ai app, go to the app's overview page, go to the section titled "Make an API request", and look for something along the lines of ``Authorization: Bearer XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX``; ``XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX`` is the API key. Wit.ai API keys are 32-character uppercase alphanumeric strings.

        The recognition language is configured in the Wit.ai app settings.

        Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the `raw API response <https://wit.ai/docs/http/20141022#get-intent-via-text-link>`__ as a JSON dictionary.

        Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection.
        Data must be audio data``key`` must be a stringr  NrL   r   z$https://api.wit.ai/speech?v=20170307	Bearer {}z	audio/wav)AuthorizationContent-Typedataheadersr   recognition request failed: {}!recognition connection failed: {}utf-8_text)r&   r	   r(   r   get_wav_datar9   r   r.   r   r   r   r   r  r   rR   decodejsonloadsr   )r   r   keyr   wav_dataurlrequestr  r  response_textrE   r   r   r   recognize_wit  s*     "
  zRecognizer.recognize_witmaskedwestusc              
   C  s  t |tstdt |ts$tdt |ts6tdd}t| ddt| dd }}	d}
zd	d
lm} W n tk
r   d}	d}
Y nX |	dks| |	krbd| d }t|ddd|dd}|
r| }zt	|dd}W nf t
k
r
 } ztd|jW 5 d}~X Y n4 tk
r< } ztd|jW 5 d}~X Y nX | d}|
rb|| _|d | _|jddd}d| dt|||d }tjdkrt|t|d|d d!d"d}n@d#t|d}|d$ | d% }t||d|d d!d"d}zt	|| jd}W nf t
k
rD } ztd&|jW 5 d}~X Y n4 tk
rv } ztd'|jW 5 d}~X Y nX | d}t|}|r|S d(|ks|d( d)ksd*|krt |d* d	 d+ |d* d	 d, fS )-aW  
        Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Microsoft Azure Speech API.

        The Microsoft Azure Speech API key is specified by ``key``. Unfortunately, these are not available without `signing up for an account <https://azure.microsoft.com/en-ca/pricing/details/cognitive-services/speech-api/>`__ with Microsoft Azure.

        To get the API key, go to the `Microsoft Azure Portal Resources <https://portal.azure.com/>`__ page, go to "All Resources" > "Add" > "See All" > Search "Speech > "Create", and fill in the form to make a "Speech" resource. On the resulting page (which is also accessible from the "All Resources" page in the Azure Portal), go to the "Show Access Keys" page, which will have two API keys, either of which can be used for the `key` parameter. Microsoft Azure Speech API keys are 32-character lowercase hexadecimal strings.

        The recognition language is determined by ``language``, a BCP-47 language tag like ``"en-US"`` (US English) or ``"fr-FR"`` (International French), defaulting to US English. A list of supported language values can be found in the `API documentation <https://docs.microsoft.com/en-us/azure/cognitive-services/speech/api-reference-rest/bingvoicerecognition#recognition-language>`__ under "Interactive and dictation mode".

        Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the `raw API response <https://docs.microsoft.com/en-us/azure/cognitive-services/speech/api-reference-rest/bingvoicerecognition#sample-responses>`__ as a JSON dictionary.

        Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection.
        r  r  r  Zdetailedazure_cached_access_tokenN azure_cached_access_token_expiryTr   	monotonicFzhttps://z0.api.cognitive.microsoft.com/sts/v1.0/issueTokenrw   !application/x-www-form-urlencoded0Content-typeContent-LengthzOcp-Apim-Subscription-Keyr  <   r"  credential request failed: {} credential connection failed: {}r%  X  r   rL   r   zQ.stt.speech.microsoft.com/speech/recognition/conversation/cognitiveservices/v1?{})r   r.   	profanityrv      r  .audio/wav; codec="audio/pcm"; samplerate=16000chunkedr  r:  zTransfer-Encoding{:X}   
   
0

r#  r$  RecognitionStatusSuccessZNBestZDisplayZ
Confidence)r&   r	   r(   r   getattrr   r6  r?   r   r   r   r   r.   r  r   rR   r(  r3  r4  r'  r   r   version_infor   r   rZ   r   r   r)  r*  r   )r   r   r+  r   r@  locationr   Zresult_formataccess_tokenexpire_timeallow_cachingr6  credential_urlcredential_request
start_timecredential_responser  r,  r-  r.  ascii_hex_data_lengthchunked_transfer_encoding_datar  r/  rE   r   r   r   recognize_azure  s    

 "

 "
"zRecognizer.recognize_azurec              
   C  s  t |tstdt |ts$tdt |ts6tdt| ddt| dd }}d}zdd	lm} W n tk
r   d}d
}Y nX |dks| |krTd}	t|	ddd|dd}
|r| }zt	|
dd}W nd t
k
r } ztd|jW 5 d}~X Y n4 tk
r. } ztd|jW 5 d}~X Y nX | d}|rT|| _|d | _|jddd}dt||t d}tjdkrt|t|d|ddd d}n@d!t|d}|d" | d# }t||d|ddd d}zt	|| jd}W nf t
k
r2 } ztd$|jW 5 d}~X Y n4 tk
rd } ztd%|jW 5 d}~X Y nX | d}t|}|r|S d&|ks|d& d'ksd(|krt  |d( S ))af  
        Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Microsoft Bing Speech API.

        The Microsoft Bing Speech API key is specified by ``key``. Unfortunately, these are not available without `signing up for an account <https://azure.microsoft.com/en-ca/pricing/details/cognitive-services/speech-api/>`__ with Microsoft Azure.

        To get the API key, go to the `Microsoft Azure Portal Resources <https://portal.azure.com/>`__ page, go to "All Resources" > "Add" > "See All" > Search "Bing Speech API > "Create", and fill in the form to make a "Bing Speech API" resource. On the resulting page (which is also accessible from the "All Resources" page in the Azure Portal), go to the "Show Access Keys" page, which will have two API keys, either of which can be used for the `key` parameter. Microsoft Bing Speech API keys are 32-character lowercase hexadecimal strings.

        The recognition language is determined by ``language``, a BCP-47 language tag like ``"en-US"`` (US English) or ``"fr-FR"`` (International French), defaulting to US English. A list of supported language values can be found in the `API documentation <https://docs.microsoft.com/en-us/azure/cognitive-services/speech/api-reference-rest/bingvoicerecognition#recognition-language>`__ under "Interactive and dictation mode".

        Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the `raw API response <https://docs.microsoft.com/en-us/azure/cognitive-services/speech/api-reference-rest/bingvoicerecognition#sample-responses>`__ as a JSON dictionary.

        Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection.
        r  r  r  bing_cached_access_tokenNbing_cached_access_token_expiryTr   r5  Fz7https://api.cognitive.microsoft.com/sts/v1.0/issueTokenrw   r7  r8  r9  r  r<  r"  r=  r>  r%  r?  r   rL   r   zWhttps://speech.platform.bing.com/speech/recognition/interactive/cognitiveservices/v1?{})r   localeZ	requestidrA  r  rC  rD  rE  rF  rG  rH  r#  r$  rI  rJ  ZDisplayText)!r&   r	   r(   r   rK  r   r6  r?   r   r   r   r   r.   r  r   rR   r(  rX  rY  r'  r   uuiduuid4r   rL  r   r   rZ   r   r   r)  r*  r   )r   r   r+  r   r   rN  rO  rP  r6  rQ  rR  rS  rT  r  r,  r-  r.  rU  rV  r  r/  rE   r   r   r   recognize_bingn  s    

 "


 "
 " zRecognizer.recognize_bing!audio/l16; rate=16000; channels=1c	                 C  s  t |tstdt |ts$tdt |ts6tdt |tsHtdt |tsZtd|dkstt |tsttd|dkst |tstd|dkst |tstd	zd
dl}	W n tk
r   tdY nX |	jd|||d}
|jddd}d}|
j	||||||d}|d S )a=  
        Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Amazon Lex API.

        If access_key_id or secret_access_key is not set it will go through the list in the link below
        http://boto3.readthedocs.io/en/latest/guide/configuration.html#configuring-credentials
        r  z``bot_name`` must be a stringz``bot_alias`` must be a stringz``user_id`` must be a stringz!``content_type`` must be a stringN"``access_key_id`` must be a string&``secret_access_key`` must be a string``region`` must be a stringr   <missing boto3 module: ensure that boto3 is set up correctly.zlex-runtimeZaws_access_key_idZaws_secret_access_keyZregion_namer   rL   r   ztext/plain; charset=utf-8)ZbotNameZbotAliasZuserIdZcontentTypeacceptZinputStreamZinputTranscript)
r&   r	   r(   r   boto3r?   r   r  r   Zpost_content)r   r   Zbot_nameZ	bot_aliasuser_idcontent_typeaccess_key_idsecret_access_keyregionre  r  r   rd  r  r   r   r   recognize_lex  s.     zRecognizer.recognize_lexc                 C  s  t |tstdt |ts$tdt |ts6td|j|jdkrHdnddd}d	}tt tt  }}ttt		 }	t
tt
||d
d |d
 |	d
 tj d
}
t||dt||dd||d||	|
dd}zt|| jd}W nf tk
r@ } ztd|jW 5 d}~X Y n4 tk
rr } ztd|jW 5 d}~X Y nX | d
}t|}|r|S d|ks|d dkrt  |d d d d |d d d d fS )au  
        Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Houndify API.

        The Houndify client ID and client key are specified by ``client_id`` and ``client_key``, respectively. Unfortunately, these are not available without `signing up for an account <https://www.houndify.com/signup>`__. Once logged into the `dashboard <https://www.houndify.com/dashboard>`__, you will want to select "Register a new client", and fill in the form as necessary. When at the "Enable Domains" page, enable the "Speech To Text Only" domain, and then select "Save & Continue".

        To get the client ID and client key for a Houndify client, go to the `dashboard <https://www.houndify.com/dashboard>`__ and select the client's "View Details" link. On the resulting page, the client ID and client key will be visible. Client IDs and client keys are both Base64-encoded strings.

        Currently, only English is supported as a recognition language.

        Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the raw API response as a JSON dictionary.

        Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection.
        r  z``client_id`` must be a stringz``client_key`` must be a string)r  r   Nr   rL   r   z!https://api.houndify.com/v1/audior%     ;application/json)ZClientIDZUserIDz{};{}z{};{};{})r  zHound-Request-InfozHound-Request-AuthenticationzHound-Client-Authenticationr  r"  r#  r$  ZDisambiguationZ
ChoiceDatar   ZTranscriptionZConfidenceScore)!r&   r	   r(   r   r'  r9   r[  r\  r'   r   base64urlsafe_b64encodehmacnewurlsafe_b64decoder   hashlibsha256digestr(  r   r)  dumpsr.   r   r   r   r   r  r   rR   r*  r   )r   r   Z	client_idZ
client_keyr   r,  r-  rf  Z
request_idZrequest_timeZrequest_signaturer.  r  r  r/  rE   r   r   r   recognize_houndify  sL     

 "
 zRecognizer.recognize_houndifyc           #      C  s  |dkst |tstd|dks4t |ts4td|dksNt |tsNtdddl}ddl}	ddl}
ddlm} | }|dko|}|pdt|
	 |j
f }|pdt|
	 |j
f }zddl}W n tk
r   tdY nX |jd	|||d
}|jd|||d
}|j|||d
}d| }z|j|d W n4 |k
rd } ztd||f  W 5 d}~X Y nX |d}|dk	rtd | }|j|||d |||}|jdd ntd d||f }|r"z|j|d}W nt |k
rR } zTtd td|j |jd d dkr@dt|kr@t }d|_d|_|n W 5 d}~X Y nX |d }|d dkrd|d  kr|d  d }ddl}ddl}|j|}| |}g }|d! d" D ] } |!t"| d# d d$  qd%}!|rt#|t"t$| }!|d! d& d d' }"z|j%|d W n8 t&k
rX } ztd(|  |	'  W 5 d}~X Y nX |j(||d) |"|!fW  5 Q R  S Q R X n|d d*krz|j%|d W n8 t&k
r } ztd(|  |	'  W 5 d}~X Y nX |j(||d) t) }d|_d|_|ntd+ t }||_d|_|nz0|j*|d,|id-d.d/ t }||_d|_|W nt |k
r } zTtd td0|j |jd d d1kr|j(||d) t }d|_d|_|n W 5 d}~X Y nX dS )2ae  
        Performs speech recognition on ``audio_data`` (an ``AudioData`` instance) using Amazon Transcribe.
        https://aws.amazon.com/transcribe/
        If access_key_id or secret_access_key is not set it will go through the list in the link below
        http://boto3.readthedocs.io/en/latest/guide/configuration.html#configuring-credentials
        Nr_  r`  ra  r   )ClientErrorz%s-%srb  
transcriberc  s3z%s.wav)BucketzError creating bucket %s: %szUploading audio data...)r{  KeyZBodyzpublic-read)ZACLzSkipping audio upload.zhttps://%s.s3.amazonaws.com/%s)TranscriptionJobNamezP!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!zError getting job:rz   ZCodeZBadRequestExceptionz#The requested job couldn't be foundZTranscriptionJobZTranscriptionJobStatus)Z	COMPLETEDZTranscriptFileUriZ
Transcriptr  itemsr  
confidencer   Ztranscriptsr  z-Warning, could not clean up transcription: %s)r{  r|  )ZFAILEDKeep waiting.ZMediaFileUriZwavr   )r}  ZMediaZMediaFormatZLanguageCodezError starting job:ZLimitExceededException)+r&   r   r(   multiprocessingr   r[  Zbotocore.exceptionsrx  current_processr\  pidre  r?   r   r  SessionZcreate_bucketprintresourcer'  Z
put_objectZ	ObjectAclputZget_transcription_jobr  r   job_namefile_keyr)  urllib.requestr.  r   loadrD   r1   sumrZ   Zdelete_transcription_jobrU   	print_excZdelete_objectr   Zstart_transcription_job)#r   r   Zbucket_namerh  ri  rj  r  r  r  r   r[  rx  proccheck_existingre  ry  rz  sessionfilenameexcZs3resr,  Z
object_aclZjob_uristatusZjobZtranscript_urir)  urllibZ	json_datadZconfidencesitemr  r  r   r   r   recognize_amazon!  s    "

"
zRecognizer.recognize_amazonc                 K  s&  ddd}|dko|}|r|}d| }d|i}	t j||	d}
|
 }|d }|d	krnt }d|_d|_|n |d
kr|d }|d }||fS td t }||_d|_|ntd|i}	t jd|	||d}
|
 d }d}d|i}|dd}	t j|||	d}
|
 }|d }t }||_d|_|dS )zW
        Wraps the AssemblyAI STT service.
        https://www.assemblyai.com/
          P c              	   s  s2   t | d}||}|sq$|V  qW 5 Q R X d S )Nrp   )rP   rR   )r  r:   _filer   r   r   r   	read_file  s
    
z2Recognizer.recognize_assemblyai.<locals>.read_fileNz)https://api.assemblyai.com/v2/transcript/authorization)r!  r  r   	completedr  textr  z$https://api.assemblyai.com/v2/upload)r!  r   
upload_urlz(https://api.assemblyai.com/v2/transcriptZ	audio_urlrm  )r  zcontent-type)r)  r!  id)r  )	requestsr0   r)  r   r  r  r  r   post)r   r   Z	api_tokenr  kwargsr  r  Ztransciption_idZendpointr!  r  r   r  r  r  r  r  r)  r   r   r   recognize_assemblyai  sV    

 zRecognizer.recognize_assemblyaic              
   C  s  t |tstdt |ts$td|j|jdkr6dnd|jdkrFdndd}d}t||dd	id
}dd |_d}|}	t	
d||	dd}
|dd|
 zt|| jd}W nd tk
r } ztd|jW 5 d}~X Y n4 tk
r } ztd|jW 5 d}~X Y nX | d}t|}|r@|S d|ksnt|d dk snd|d d krtt g }d}|d D ]L}d|krt |d D ],}d|kr||d  |d } qqqd||fS )a  
        Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the IBM Speech to Text API.

        The IBM Speech to Text username and password are specified by ``username`` and ``password``, respectively. Unfortunately, these are not available without `signing up for an account <https://console.ng.bluemix.net/registration/>`__. Once logged into the Bluemix console, follow the instructions for `creating an IBM Watson service instance <https://www.ibm.com/watson/developercloud/doc/getting_started/gs-credentials.shtml>`__, where the Watson service is "Speech To Text". IBM Speech to Text usernames are strings of the form XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX, while passwords are mixed-case alphanumeric strings.

        The recognition language is determined by ``language``, an RFC5646 language tag with a dialect like ``"en-US"`` (US English) or ``"zh-CN"`` (Mandarin Chinese), defaulting to US English. The supported language values are listed under the ``model`` parameter of the `audio recognition API documentation <https://www.ibm.com/watson/developercloud/speech-to-text/api/v1/#sessionless_methods>`__, in the form ``LANGUAGE_BroadbandModel``, where ``LANGUAGE`` is the language value.

        Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the `raw API response <https://www.ibm.com/watson/developercloud/speech-to-text/api/v1/#sessionless_methods>`__ as a JSON dictionary.

        Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection.
        r  r  r   NrL   r   zFhttps://gateway-wdc.watsonplatform.net/speech-to-text/api/v1/recognizer  zaudio/x-flacr  c                   S  s   dS )NPOSTr   r   r   r   r   <lambda>  rw   z*Recognizer.recognize_ibm.<locals>.<lambda>Zapikeyz{}:{}r%  r  zBasic {}r"  r#  r$  r  r   r  r   r  r  
)r&   r	   r(   r   r  r9   r   r   
get_methodrn  standard_b64encoder.   r   r(  
add_headerr   r   r   r   r  r   rR   r)  r*  rZ   r   rD   r   )r   r   r+  r   r   r   r-  r.  usernamepasswordZauthorization_valuer  r  r/  rE   Ztranscriptionr  Z	utterancer   r   r   r   recognize_ibm  sN     
 "
.
 
zRecognizer.recognize_ibmrh   &tensorflow-data/conv_actions_frozen.pb'tensorflow-data/conv_actions_labels.txtc              
   C  sR  t |tstdt |ts$tdt |ts6tdzddl}W n tk
r^   tdY nX || jks|| _|j	|d*}|
 }||  |j|dd	 W 5 Q R X d
d |j|D | _|jddd}| l}d}	d}
|j|
}|||	|i\}| dd ddd }|D ] }| j| }|  W  5 Q R  S W 5 Q R X dS )a3  
        Performs speech recognition on ``audio_data`` (an ``AudioData`` instance).

        Path to Tensor loaded from ``tensor_graph``. You can download a model here: http://download.tensorflow.org/models/speech_commands_v0.01.zip

        Path to Tensor Labels file loaded from ``tensor_label``.
        r  z!``tensor_graph`` must be a stringz!``tensor_label`` must be a stringr   NzFmissing tensorflow module: ensure that tensorflow is set up correctly.rp   rh   )rB   c                 S  s   g | ]}|  qS r   )rstrip)r   liner   r   r   
<listcomp>[  s     z3Recognizer.recognize_tensorflow.<locals>.<listcomp>r   rL   r   z
wav_data:0zlabels_softmax:0r   )r&   r	   r(   r   Z
tensorflowr?   r   lasttfgraphZgfileZ	FastGFileZGraphDefZParseFromStringrR   Zimport_graph_defZGFiletflabelsr'  r  graphZget_tensor_by_namerunZargsort)r   r   Ztensor_graphZtensor_labeltfr   Z	graph_defr,  sessZinput_layer_nameZoutput_layer_nameZsoftmax_tensorZpredictionsZtop_kZnode_idZhuman_stringr   r   r   recognize_tensorflowA  s6    
 

zRecognizer.recognize_tensorflowbasec                 K  s   t |tstdddl}ddl}	ddl}
ddl}|sPt| drP| j	|dkrvt
| di | _|j|f|pli | j|< |jdd}t|}|	|\}}||j}| j| j|f||rdnd|
j d|}|r|S |d	 S dS )
ae  
        Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using Whisper.

        The recognition language is determined by ``language``, an uncapitalized full language name like "english" or "chinese". See the full language list at https://github.com/openai/whisper/blob/main/whisper/tokenizer.py

        model can be any of tiny, base, small, medium, large, tiny.en, base.en, small.en, medium.en. See https://github.com/openai/whisper for more details.

        If show_dict is true, returns the full dict response from Whisper, including the detected language. Otherwise returns only the transcription.

        You can translate the result to english with Whisper by passing translate=True

        Other values are passed directly to whisper. See https://github.com/openai/whisper/blob/main/whisper/transcribe.py for all options
        r  r   Nwhisper_modelr   )r   	translate)r   taskZfp16r  )r&   r	   r(   numpyZ	soundfiletorchwhisperrj   r  r0   rK  Z
load_modelr'  r   r   rR   ZastypeZfloat32ry  cudaZis_available)r   r   modelZ	show_dictZload_optionsr   r  Ztranscribe_optionsnpZsfr  r  Z	wav_bytesZ
wav_streamZaudio_arrayZsampling_raterE   r   r   r   recognize_whisperm  s0    


zRecognizer.recognize_whisperenc                 C  sr   ddl m}m} t|ts"tdt| dsFtj	ds<dS |d| _|| jd}||jdd	d
 | }|S )Nr   )KaldiRecognizerModelr  
vosk_modelr  zPlease download the model from https://github.com/alphacep/vosk-api/blob/master/doc/models.md and unpack as 'model' in the current folder.r   r   rL   r   )Zvoskr  r  r&   r	   r(   rj   r}   r   r   exitr  ZAcceptWaveformr   ZFinalResult)r   r   r   r  r  ZrecZfinalRecognitionr   r   r   recognize_vosk  s    
 
zRecognizer.recognize_vosk)NN)r   )N)NNN)N)r   NNF)Nr   NF)F)r   r1  r2  F)r   F)r^  NNN)F)NNNNNN)N)r   F)r  r  )r  FNNF)r  )r   r    r!   r   r   r   r   r   r   r  r  r0  rW  r]  rk  rw  r  r  r  r  r  r  r  r  r   r   r   r   r   F  s,   
!

1
a
!
]
H
%
`
\
"
5
 %
C
6
,
,r   c                   @  sB   e Zd ZdZdddZdd Zdd Zd	d
 Zdd Zdd Z	dS )r   zLimited replacement for ``tempfile.NamedTemporaryFile``, except unlike ``tempfile.NamedTemporaryFile``, the file can be opened again while it's currently open, even on Windows.w+bc                 C  s
   || _ d S ra   )mode)r   r  r   r   r   r     s    z#PortableNamedTemporaryFile.__init__c                 C  s&   t  \}}t|| j| _|| _| S ra   )tempfilemkstempr}   fdopenr  r  rB   )r   file_descriptor	file_pathr   r   r   r     s    z$PortableNamedTemporaryFile.__enter__c                 C  s   | j   t| j d S ra   )r  rQ   r}   removerB   r   r   r   r   r     s    
z#PortableNamedTemporaryFile.__exit__c                 O  s   | j j||S ra   )r  r   r   argsr  r   r   r   r     s    z PortableNamedTemporaryFile.writec                 O  s   | j j||S ra   )r  r   r  r   r   r   r     s    z%PortableNamedTemporaryFile.writelinesc                 O  s   | j j||S ra   )r  r   r  r   r   r   r     s    z PortableNamedTemporaryFile.flushN)r  )
r   r    r!   rd   r   r   r   r   r   r   r   r   r   r   r     s   
	r   )googler  r  Fc              
   C  s  |j ddd}d}t j}|d|krq.q|d kr@t j}d|d d d d	 d d
 |d d |d d d |d d d d d | d d |d d }	t||	d|tt|	dd|dd}
zt	|
dd}W nf t
k
r& } ztd|jW 5 d }~X Y n4 tk
rX } ztd|jW 5 d }~X Y nX | d}t|}|r||S d|ksd|d ks|d d dkrt |d d S )Nr   rL   r   zhttps://api.api.ai/v1/queryr%  s   --rG  s0   Content-Disposition: form-data; name="request"
s    Content-Type: application/json
s    {"v": "20150910", "sessionId": "s   ", "lang": "s   "}
sH   Content-Disposition: form-data; name="voiceData"; filename="audio.wav"
s   Content-Type: audio/wav
s   --
r  z100-continuez multipart/form-data; boundary={})r  r;  ZExpectr  r  
   r"  r#  r$  r  Z	errorTypesuccessrE   ZresolvedQuery)r'  r[  r\  hexr   r   r.   r   rZ   r   r   r   r  r   rR   r(  r)  r*  r   )r   r   Zclient_access_tokenr   Z
session_idr   r,  r-  boundaryr   r.  r  r  r/  rE   r   r   r   recognize_api  s,    
  
v,    "
 *r  )r  NF):rd   
__future__r   r|   rV   rn  r   rs  rp  r   r)  r   r}   r~   r   r  r   r   r[  ry   urllib.errorr   r   urllib.parser   r  r   r   r  ModuleNotFoundErrorr?   r7   r	   r
   
exceptionsr   r   r   r   r   
__author__rA   __license__rf   r   r"   rg   r   r   Zrecognizersr  r  Zrecognize_legacyZrecognize_googleZrecognize_whisper_apiZWavFiler  classmethodr   r   r   r   <module>   sj    t        n
