import os
import sys
from moviepy.editor import VideoFileClip
from pydub import AudioSegment
from pydub.silence import detect_nonsilent
import pandas as pd
import audiofile
import speech_recognition as sr


def transcribe_audio(audio_file):
    recognizer = sr.Recognizer()

    with sr.AudioFile(audio_file) as source:
        audio = recognizer.record(source)

    try:
        result = recognizer.recognize_google(audio, language='ar-MA', show_all=True)
        if result and 'alternative' in result:
            best_transcription = result['alternative'][0]['transcript']
            return best_transcription
        return None
    except sr.UnknownValueError:
        print("Could not understand the audio")
        return None
    except sr.RequestError as e:
        print(f"Could not request results from Google Speech Recognition service; {e}")
        return None

def process_video(input_file, min_silence_len=1000, silence_thresh=-36, min_duration=2, use_min_duration=True):
    # Load video and extract audio
    video = VideoFileClip(input_file)
    audio = video.audio

    # Save audio to a temporary WAV file
    audio.write_audiofile("temp_audio1.wav")

    # Load audio using pydub
    audio_segment = AudioSegment.from_wav("temp_audio1.wav")
    os.remove("temp_audio1.wav")

    # Detect non-silent intervals
    nonsilent_intervals = detect_nonsilent(audio_segment, min_silence_len, silence_thresh)

    # Create output folder
    video_name = os.path.splitext(os.path.basename(input_file))[0]
    output_folder = f"{video_name}_output"
    if not os.path.exists(output_folder):
        os.makedirs(output_folder)

    # Cut video into chunks and save metadata
    metadata = []
    part_number = 0
    for start, end in nonsilent_intervals:
        start_time = start / 1000
        end_time = end / 1000
        duration = end_time - start_time

        # Check if the duration is longer than the minimum duration
        if use_min_duration and duration < min_duration:
            continue

        part_number += 1
        output_file = os.path.join(output_folder, f"{video_name}_part_{part_number}.mp4")

        # Cut video and write to file
        video_chunk = video.subclip(start_time, end_time)
        video_chunk.write_videofile(output_file)

        # Extract audio from the video chunk and transcribe
        audio_chunk = video_chunk.audio
        audio_chunk.write_audiofile("temp_audio_chunk.wav")
        transcription = transcribe_audio("temp_audio_chunk.wav")
        os.remove("temp_audio_chunk.wav")

        metadata.append({
            "filename": output_file,
            "start_at": start_time,
            "end_at": end_time,
            "transcription": transcription,
        })

    # Save metadata to Excel file
    df = pd.DataFrame(metadata)
    df.to_excel(os.path.join(output_folder, f"{video_name}_metadata.xlsx"), index=False)
    print("Video processing completed")

if __name__ == "__main__":
    if len(sys.argv) < 2:
        print("Usage: python script.py input_video.mp4")
        sys.exit(1)

    input_file = sys.argv[1]
    process_video(input_file)