import os 
import uuid 
import logging
import datetime
import subprocess 
import numpy as np  
from typing import List, Dict, Union
from tempfile import NamedTemporaryFile
from concurrent.futures import ThreadPoolExecutor

from django.core.files.storage import default_storage 
 
from pydub import AudioSegment  
from moviepy.editor import VideoFileClip, AudioFileClip

from apps.process.models import Status, ProcessedMedia
from apps.process.utils import get_audio_parts

# Create a logger for this file
logger = logging.getLogger(__file__)
  
    
# Define constants for temporary directory and FFmpeg options
TEMP_AUDIO_DIR     = "media/original_audio"
AUDIO_EXTENSION    = ".mp3"
FFMPEG_EXECUTABLE  = "ffmpeg"
AFV_FFMPEG_COMMAND = f"{FFMPEG_EXECUTABLE} -hide_banner -i '{{input_file_path}}' -vn -acodec libmp3lame '{{output_file_path}}'"
FFMPEG_OPTIONS     = {"shell": True}


def extract_audio_from_video(video_file_path: str) -> Dict[str, Union[str, AudioSegment]]:
    """
    Extracts audio from a video file and returns the extracted audio as an AudioSegment object.

    Args:
        video_file_path (str): The path to the video file.

    Returns:
        dict: A dictionary containing the status of the audio extraction and the extracted audio data as an AudioSegment object or a message error.

    Raises:
        CalledProcessError: If the FFmpeg command returns a non-zero exit code.
        Exception: If there is an error extracting audio from the video file.

    """
    try:
        # Get URL of video file from S3 storage
        _, filename = os.path.split(video_file_path)
        file_url = default_storage.url(video_file_path)
        
        # Create a temporary directory to store the output file
        if not os.path.exists(TEMP_AUDIO_DIR):
            os.makedirs(TEMP_AUDIO_DIR)

        output_path = os.path.join(TEMP_AUDIO_DIR, filename.replace(".mp4", AUDIO_EXTENSION))

        # Run FFmpeg command to extract audio and save it to the temporary file
        cmd = AFV_FFMPEG_COMMAND.format(input_file_path=file_url, output_file_path=output_path)
        subprocess.run(cmd, **FFMPEG_OPTIONS)

        # Load the output file using Pydub
        extracted_audio = AudioSegment.from_file(output_path)

        return {"status": "success", "data": extracted_audio}
    except subprocess.CalledProcessError as e:
        error_message = f"FFmpeg failed with error code {e.returncode}: {e.output}"
        logger.error(error_message)
        return {"status": "error", "message": "FFmpeg failed to extract audio"}
    except Exception as e:
        error_message = f"Failed to extract audio from video file: {e}"
        logger.error(error_message)
        return {"status": "error", "message": error_message}
    finally:
        with default_storage.open(output_path, 'wb+') as destination: 
            file_data = open(output_path, 'rb')
            destination.write(file_data.read()) 
            file_data.close() 
        # remove the downloaded file
        os.remove(output_path)
        # remove the temporary directory if it is empty
        if os.path.exists(TEMP_AUDIO_DIR) and not os.listdir(TEMP_AUDIO_DIR):
            os.rmdir(TEMP_AUDIO_DIR)
			

def audiosegment_to_librosawav(sound: AudioSegment) -> dict:
    """
    Converts an AudioSegment object to a numpy array of floats suitable for use with librosa.

    Args:
        sound (AudioSegment): The AudioSegment to convert.

    Returns:
        dict: A dictionary containing the numpy array of floats and a status message.

    Raises:
        ValueError: If the sound is not in stereo.
    """
    try:
        if sound.channels != 2:
            logger.exception("Only stereo audio is supported.")
            return {"status": "error", "message": "Only stereo audio is supported."} 

        logger.info(f"Converting AudioSegment to librosa format with sample rate {sound.frame_rate} Hz and {sound.sample_width*8} bits per sample.")
        # TODO :
        sound = sound.set_frame_rate(sound.frame_rate)
        channel_sounds = sound.split_to_mono()
        samples = [s.get_array_of_samples() for s in channel_sounds]
        fp_arr = np.array(samples).T.astype(np.float32)
        fp_arr /= np.iinfo(samples[0].typecode).max 
        # TODO :
        logger.info("Conversion successful.")
        return {"status": "success", "audio": fp_arr}
    except ValueError as e:
        logger.exception("Error during conversion.")
        return {"status": "error", "message": str(e)}
    except Exception as e:
        logger.exception("Unknown error occurred.")
        return {"status": "error", "message": "Unknown error occurred."}


def remove_sound(audio: AudioSegment, part: list, MLmodel, save: bool) -> dict:
    """
    Removes background music from a specific part of an audio file.

    Args:
        audio (AudioSegment): The audio file to process.
        part (list): A list containing the start and end times (in milliseconds) of the part of the audio file to process.
        MLmodel: The machine learning model to use for removing the background music.
        save (bool): A boolean flag indicating whether to save the extracted audio part to a file.

    Returns:
        dict: A dictionary containing the extracted part of the audio file, the voice audio (i.e., the part of the audio that is not background music), and the background music.
    """
    try:
        # TODO : Extract audio part from audio file
        extracted_audio = audio[part[0] : part[1]] 
        # TODO : Convert audio part to librosa format 
        audio_librosa = audiosegment_to_librosawav(extracted_audio)
        if audio_librosa['status'] == 'error':
            return audio_librosa
        audio_librosa = audio_librosa["audio"]
        audio_librosa = audio_librosa.T
        # TODO : Pass audio part to ML model to remove background music
        voice_audio, background_audio = MLmodel.split(audio_librosa)
        # TODO : Write the extracted audio part to a file.
        if save:
            if not os.path.exists("media/music_output"):
                os.mkdir("media/music_output")
            extracted_audio.export(f"media/music_output/extracted_audio_[{part}].wav", format="wav")
            voice_audio.export(f"media/music_output/voice_audio_[{part}].wav", format="wav")
            background_audio.export(f"media/music_output/background_audio_[{part}].wav", format="wav")
        # TODO :  
        return {
            "status": "success",
            "part": part,
            "voice": voice_audio,
            "background": background_audio,
        }
        
    except Exception as e:
        return {"status": "error", "message": str(e)}


def process_results(results):
    """
    Processes a list of results and checks if there are any errors.

    Args:
        results (list): A list of result dictionaries.

    Returns:
        bool: True if all results were successful, False otherwise.
    """
    success = True  # assume success unless an error is encountered

    for result in results:
        if result["status"] == "error":
            # handle the error case
            logger.error(f"Error processing result: {result['message']}")
            success = False
            break  # stop processing results if there is an error

    if success:
        logger.info("All results were processed successfully.")
    else:
        logger.warning("There were errors processing some results.")

    return success


def process_audio(audio_data: AudioSegment, all_parts: List[tuple], parts_list: List[tuple], results: List[Dict]) -> Dict[str, any]:
    """
    Concatenates audio data based on specified parts and results from voice processing.

    Args:
        audio_data (AudioSegment): The original audio data.
        all_parts (List[tuple]): A list of tuples specifying the start and end times (in milliseconds) of each part to process.
        parts_list (List[tuple]): A list of tuples specifying the start and end times (in milliseconds) of each part to include in the final output.
        results (List[Dict]): A list of dictionaries containing processed voice data, with each dictionary containing a "part" key indicating the corresponding part of the audio.

    Returns:
        Dict[str, any]: A dictionary containing the concatenated audio data and status message.

    Raises:
        Dict[str, any]: A dictionary containing an error message if any of the arguments are of the wrong type.

    """
    if not isinstance(audio_data, AudioSegment):
        return {"status": "error", "message": "Argument `audio_data` must be of type `pydub.AudioSegment`."}
    if not isinstance(all_parts, list) or not isinstance(parts_list, list) or not isinstance(results, list):
        return {"status": "error", "message": "Arguments `all_parts`, `parts_list`, and `results` must be of type `list`."}

    # Attribute for Saving Final Audio
    audiofinal = AudioSegment.empty()

    try:
        for part in all_parts:
            # # Check that the specified part is valid
            # if part not in audio_data:
            #     raise ValueError(f"Invalid part specified: {part}")

            if part in parts_list and results:
                for result in results:
                    # Check that the result corresponds to the current part
                    if result.get("part") == part:
                        # Convert the voice data to a NumPy array and create an AudioSegment object
                        audio_segment = np.int16(result["voice"] * (2**15))
                        audio_segment = AudioSegment(audio_segment.tobytes(),
                                                     frame_rate=audio_data.frame_rate,
                                                     sample_width=audio_segment.dtype.itemsize,
                                                     channels=2)
                        audiofinal += audio_segment
            else:
                # Add the specified part to the output
                audiofinal += audio_data[part[0]:part[1]]

        logger.info("Audio processing completed successfully.")
        return {"status": "success", "message": "Audio processing completed successfully.", "audio": audiofinal}
    except Exception as e:
        # Handle any exceptions that occur during processing
        return {"status": "error", "message": f"Error occurred: {e}"}


def replace_audio(video_file_path: str, audio: AudioSegment):
    """
    Replaces the audio of a video file with an AudioSegment object.

    Parameters:
    video_file_path (str): The path of the video file.
    audio (AudioSegment): The AudioSegment object to replace the audio with.

    Returns:
    VideoFileClip: The new video clip with the replaced audio.

    Raises:
    FileNotFoundError: If the video file does not exist.
    """

    # Get URL of video file from S3 storage
    video_url = default_storage.url(video_file_path)
    # Load the video clip from the URL
    video_clip = VideoFileClip(video_url)
    # Export the audio to a temporary file
    with NamedTemporaryFile(suffix='.wav', delete=False) as f:
        audio.export(f.name, format='wav')
        audio_temp_path = f.name
    # Load the audio file as a clip
    audio_clip = AudioFileClip(audio_temp_path)
    # Replace the audio in the video clip with the provided audio clip
    video_clip = video_clip.set_audio(audio_clip)
    # Remove the temporary file
    os.unlink(audio_temp_path)
    # Return the new video clip with the replaced audio
    return video_clip


def replace_audio_ffmpeg(video_file_path: str, audio: AudioSegment):
    """
    Replaces the audio of a video file with an AudioSegment object using FFmpeg.

    Parameters:
    video_file_path (str): The path of the video file in the S3 bucket.
    audio (AudioSegment): The AudioSegment object to replace the audio with.

    Returns:
    str: Path to the output temporary video file.
    """
    video_suffix = os.path.splitext(video_file_path)[1]
    # Get the video file from S3 and save it as a temporary file
    with NamedTemporaryFile(suffix=video_suffix, delete=False) as video_temp:
        with default_storage.open(video_file_path, 'rb') as video_file:
            video_temp.write(video_file.read())
        video_temp_path = video_temp.name


    # Get a temporary file for the audio
    with NamedTemporaryFile(suffix='.wav', delete=False) as audio_temp:
        audio.export(audio_temp.name, format='wav')
        audio_temp_path = audio_temp.name

    # Get a temporary file for the output video
    with NamedTemporaryFile(suffix=video_suffix, delete=False) as output_temp:
        output_temp_path = output_temp.name

    # Build the FFmpeg command
    ffmpeg_command = [
        'ffmpeg',
        '-hide_banner',
        '-i', video_temp_path,
        '-i', audio_temp_path,
        '-c:v', 'copy',          # Copy video codec
        '-c:a', 'aac',           # AAC audio codec
        '-strict', 'experimental',
        '-map', '0:v:0',         # Video stream from the input
        '-map', '1:a:0',         # Audio stream from the input
        output_temp_path
    ]

    try:
        # Run the FFmpeg command
        subprocess.run(ffmpeg_command, check=True)
    except subprocess.CalledProcessError as e:
        raise RuntimeError(f"FFmpeg command failed with error: {e}")
    finally:
        # Clean up temporary files
        os.unlink(audio_temp_path)

    return output_temp_path

def start_removing_sounds(MLmodel, parts_list: list, video_path: str, media_id):
    """ This function is used to remove the sounds from the video. """
    start_time = datetime.datetime.now()
    # media Object to get all info and use it to save processed media
    processedMedia  = ProcessedMedia.objects.get(id = media_id)
    print(f"Reciving File : {video_path}")
    # 
    audio_bytes = extract_audio_from_video(video_path) 
    error_message=""
    # 
    if audio_bytes['status'] == 'success':
        # Get Audio Bytes from audio_bytes
        audio_data = audio_bytes['data'] 
        # Testing if parts_list is empty and if it is a list.
        if parts_list is not None and parts_list == []:
            # Create A New Parts List Of The Audio File With librosa.
            # parts_list = detect_nonsilent(audio, min_silence_len=1000, silence_thresh=-20, seek_step=1)
            parts_list= [[int(0),len(audio_data)]] 
        # Create a Thread-Poll to remove the sounds from the audio part. 
        #? TODO : Depends On CPU & RAM :: Need To Do Test With GPU
        with ThreadPoolExecutor() as executor:
            # Create a list of futures.
            futures = [executor.submit(remove_sound, audio_data, part, MLmodel, False) for part in parts_list]
            # Wait for the results to come back.
            results = [future.result() for future in futures] 
        # Add Check for Processing Parts If Its Okay
        if process_results(results):
            # Create a parts list for audio based on the part_list attribute
            all_parts = get_audio_parts(len(audio_data), parts_list) 
            # Generate New Audio Object by Combining Processed Parts And Unprocessed Parts
            generate_data = process_audio(audio_data, all_parts, parts_list, results)
            # Add Check for Combining Processed Parts And Unprocessed Parts
            if generate_data['status'] == 'success': 
                #? TODO : Get Final Video With New Audio
                # new_video = replace_audio_ffmpeg(video_path, generate_data['audio'])
                new_video = replace_audio(video_path, generate_data['audio'])
                #? TODO : handle error 
                #? TODO : Save Final Video To S3 Bucket
                #? TODO : define path (& key bucket) of the new video 
                video_file_name = f"generated_videos/{(str(uuid.uuid4()).split('-'))[0]}_{os.path.split(video_path)[1]}"
                #? TODO :  save the new video in S3 Bucket 
                
                # # Save the new video using default_storage
                # with default_storage.open(video_file_name, 'wb+') as destination:
                #     with open(new_video, 'rb') as file_data:
                #         destination.write(file_data.read())
                
                # # Clean up temporary files
                # os.unlink(new_video)

                # pass the bytes object to a function that expects a bytes object  
                with default_storage.open(video_file_name, 'wb+') as destination: 
                    with NamedTemporaryFile(suffix='.mp4', delete=False) as f:
                        new_video.write_videofile(f.name, codec="libx264", audio_codec='aac', fps=new_video.fps, remove_temp=True, preset="ultrafast", threads=4, ffmpeg_params=["-crf", "23"], write_logfile=False) 
                        file_data = open(f.name, 'rb')
                        destination.write(file_data.read()) 
                        file_data.close() 
                    # Remove the temporary file
                    os.unlink(f.name)
                
                print("[Test Mode]|< Upload Completed >|[Info: Check S3 Bucket For File.]")
                # Update ProcessedMedia variable :> Saving Video In DataBase 
                processedMedia.status = Status.Completed
                processedMedia.video_path = video_file_name
                # processedMedia.audio_path = new_audio_path
            else:
                processedMedia.status = Status.Failed
                error_message = generate_data['message']
                
        else:
            processedMedia.status = Status.Failed 
    else:
        processedMedia.status = Status.Failed
        error_message = audio_bytes['message']
    print(error_message)
    # TODO : handle error 
    processedMedia.save() 
    end_time = datetime.datetime.now()
    print(f"Time taken: {(end_time - start_time).total_seconds()} seconds")