import os
import cv2
import time
import json
import psutil
import logging
import datetime
import dicttoxml
import mimetypes

try:
    import pandas as pd
except ImportError as e:
    print("Did not find pandas module, installing it")
    import subprocess
    subprocess.call(["pip3", "install", "pandas"])
    import pandas as pd
    print("Installation complete.")

# Set the logging level (debug, info, warning, error, critical)
logging.basicConfig(level=logging.DEBUG)
# Create a logger object
logger = logging.getLogger()
 
# Create a file handler for the logger
file_handler = logging.FileHandler("aws_metadata_test2.log")
# Create a formatter for the file handler
formatter = logging.Formatter("[%(asctime)s][%(name)s][%(levelname)s] :: %(message)s")
file_handler.setFormatter(formatter)
# Add the file handler to the logger
logger.addHandler(file_handler)
logger.removeHandler(logging.getLogger().handlers[0])


#🚀 working good
def execute_time(func):
    def wrapper(*args, **kwargs):
        start_time = datetime.datetime.now()
        result = func(*args, **kwargs)
        end_time = datetime.datetime.now()
        logger.info(f"Function '{func.__name__}' called with arguments: {args} {kwargs}")
        logger.info(f'Time taken by {func.__name__}: {(end_time - start_time).total_seconds()} seconds')
        return result
    return wrapper

#🚀 working good
@execute_time
def seconds_to_hhmmss(seconds):
    return (datetime.datetime(1,1,1) + datetime.timedelta(seconds=seconds)).strftime("%H:%M:%S.%f")[:-3]

#🚀 working good
@execute_time
def is_file_open(filename):
    for proc in psutil.process_iter():
        try:
            open_files = proc.open_files()
            if open_files:
                for file in open_files:
                    if file.path == filename:
                        return True
        except Exception as e:
            continue
    return False

#🚀 working good
@execute_time
def wait_until_file_not_open(filename):
    while is_file_open(filename):
        time.sleep(1)

#🚀 working good
@execute_time
def combine_json_files(folder_path, total_frames, interval, save_xml):
    while True:
        # initialize an empty list to store the contents of the JSON files
        json_files = []
        # loop through all files in the folder
        for filename in os.listdir(folder_path):
            # print(filename)
            # check if the file is a JSON file
            if filename.endswith(".json"):
                # read the contents of the file
                with open(os.path.join(folder_path, filename), "r") as file:
                    json_files.append(json.load(file))
        # check if the number of JSON files is equal to (total_frames/interval)
        if [len(json_files) == ((total_frames//interval)+1)]:

            file_name = "_".join((folder_path.split("/"))[-2:])
            json_files = sorted(json_files, key=lambda x: x["frame_id"])
            # combine the contents of the JSON files into a single dictionary
            # write the combined JSON to a file
            with open(file_name + ".json", "w") as file:
                json.dump(json_files, file)

            if save_xml :
                xml = dicttoxml.dicttoxml(json_files)
                with open(file_name + ".xml", "wb") as file:
                    file.write(xml) 
            break
        else:
            # wait for 10 mili-second before checking again
            time.sleep(0.001)

#🚀 working good
@execute_time
def json_to_csv(json_file_path):
    # Load the JSON file
    df = pd.read_json(json_file_path)
    # Get the directory and base name of the input file
    dir_name, base_name = os.path.split(json_file_path)
    # Replace the extension with .csv
    csv_file_path = os.path.join(dir_name, os.path.splitext(base_name)[0] + ".csv")
    # Convert to CSV
    df.to_csv(csv_file_path, index=False)

#🚀 working good
@execute_time
def get_statistics(file_path, save_xml):

    data = []
    results = []
    with open(file_path, "r") as file:
        data = json.load(file)
    
    for frame in data:
        object_count     = {}
        # confidence_sum   = {}
        parents_count    = {}
        # aliases_count    = {}
        categories_count = {}

        if "response" in frame and frame['response']: 
            for label in frame['response']:
                if "Name" in label and label['Name']:
                    #  Object count
                    if "Instances" in label and label['Instances'] and len(label['Instances']) > 0:
                        object_count[label['Name']] = object_count.get(label['Name'], 0) + len(label['Instances'])
                    else:
                        object_count[label['Name']] = object_count.get(label['Name'], 0) + 1

                    # # confidence average
                    # confidence_sum[label['Name']] = confidence_sum.get(label['Name'], 0) + label['Confidence']

                    # parents count
                    for parents in label['Parents']:
                        parents_count[parents['Name']] = parents_count.get(parents['Name'], 0) + 1

                    # categories count
                    for categories in label['Categories'] :
                        categories_count[categories['Name']] = categories_count.get(categories['Name'], 0) + 1

                    # # aliases count
                    # for aliases in label['Aliases']:
                    #     aliases_count[aliases['Name']] = aliases_count.get(aliases['Name'], 0) + 1

            results.append({
                "frame_id": frame['frame_id'],
                "frame_time": frame['frame_time'],
                "object_count": object_count,
                "parents_count": parents_count,
                "categories_count": categories_count,
                # "confidence_sum": confidence_sum,
                # "aliases_count": aliases_count,
            }) 
    # 
    file, _ = file_path.split(".")
    file_name = "_".join([file, "statistics"])
    with open(file_name + ".json", "w") as file:
        json.dump(results, file)
    # 
    if save_xml :
        xml = dicttoxml.dicttoxml(results)
        with open(file_name + ".xml", "wb") as file:
            file.write(xml) 
        
#🚀 working good
@execute_time
def draw_boxes(response, frame):
    height, width, _ = frame.shape
    for label in response:
        # print()
        # print(f"Name : {label['Name']}") 
        # print(f"Confidence : {label['Confidence']}") 
        # print(f"Parents : {label['Parents']}") 
        # print(f"Aliases : {label['Aliases']}") 
        # print(f"Categories : {label['Categories']}") 
        if "Instances" in label and label['Instances']:
            for instances in label['Instances']:
                if 'BoundingBox' in instances and instances['BoundingBox']:
                    box  = instances["BoundingBox"]
                    x    = width * box['Left']
                    y    = height * box['Top']
                    w    = width * box['Width']
                    h    = height * box['Height']
                    # text = f"{label['Name']} | {label['Confidence']}"
                    text = f"{label['Name']}"
                    # print(f"BoundingBox : [ Left:{x}, Top:{y}, Width:{w}, Height:{h} ]") 
                    cv2.rectangle(frame, (int(x), int(y)), (int(x) + int(w), int(y) + int(h)), (255, 0, 0), 2)
                    cv2.putText(frame, text, (int(x), int(y)-8), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2)
    return frame

#🚀 working good
@execute_time
def get_outputs(file_path, json_path):
    file_name, file_ext = file_path.split(".")
    output_path = "_".join([file_name, "output", datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S")])+f".{file_ext}"
    #
    data = []
    with open(json_path, "r") as file:
        data = json.load(file)
    #
    input_type = mimetypes.guess_type(file_path)[0]
    if input_type.startswith("image") and len(data) == 1 :
        # Load the image
        image = cv2.imread(file_path)
        image = draw_boxes(data[0]["response"], image)
        cv2.imwrite(output_path, image)
    elif input_type.startswith("video") and len(data) >= 1 :
        # Read the video file into memory
        video = cv2.VideoCapture(file_path)
        # Get the frame rate
        frame_rate = video.get(cv2.CAP_PROP_FPS)
        # Get the frame size of the video
        # Define the codec and create a video writer object
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        out = cv2.VideoWriter(output_path, fourcc, frame_rate, (int(video.get(3)), int(video.get(4))), isColor=True)

        # # Extract the frame_id values into a list
        # frame_ids = [item["frame_id"] for item in data]
        
        frame_number = 0
        # Loop over each frame in the video
        while True:
            ret, frame = video.read()
            if not ret:
                break
            # Filter the data for the target frame_id
            target_data = [item for item in data if item["frame_id"] == frame_number]
            if target_data:
                frame = out.write(draw_boxes(target_data[0]["response"], frame))
            out.write(frame)
            frame_number += 1
        
        # Close the output video file
        out.release()
        # Close the video file
        video.release()