U
    m){f                     @  sV   d dl mZ d dlZd dlmZ d dlmZ d dlmZ ddddd	d
dddZ	dS )    )annotationsN)BytesIO)	AudioData)
SetupErrorz	whisper-1)modelapi_keyz'AudioData'strz
str | None)
audio_datar   r   c                C  s   t |tstd|dkr2tjddkr2tdzddl}W n tk
rZ   tdY nX t	|
 }d|_|j|d}|jjj||d	}|jS )
a  
    Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the OpenAI Whisper API.

    This function requires an OpenAI account; visit https://platform.openai.com/signup, then generate API Key in `User settings <https://platform.openai.com/account/api-keys>`__.

    Detail: https://platform.openai.com/docs/guides/speech-to-text

    Raises a ``speech_recognition.exceptions.SetupError`` exception if there are any issues with the openai installation, or the environment variable is missing.
    z0``audio_data`` must be an ``AudioData`` instanceNZOPENAI_API_KEYz+Set environment variable ``OPENAI_API_KEY``r   z>missing openai module: ensure that openai is set up correctly.zSpeechRecognition_audio.wav)r   )filer   )
isinstancer   
ValueErrorosenvirongetr   openaiImportErrorr   Zget_wav_datanameZOpenAIZaudioZtranscriptionscreatetext)Z
recognizerr	   r   r   r   Zwav_dataclientZ
transcript r   e/var/www/html/Darija-Ai-API/env/lib/python3.8/site-packages/speech_recognition/recognizers/whisper.pyrecognize_whisper_api
   s    

r   )

__future__r   r   ior   Zspeech_recognition.audior   Zspeech_recognition.exceptionsr   r   r   r   r   r   <module>   s   