import os, glob
import json
from datetime import datetime, timezone
from dataclasses import dataclass
from datasets import load_dataset, Dataset
import pandas as pd
import gradio as gr
from huggingface_hub import HfApi, snapshot_download, ModelInfo, list_models
from enum import Enum

OWNER = "AIEnergyScore"
COMPUTE_SPACE = f"{OWNER}/launch-computation-example"
TOKEN = os.environ.get("DEBUG")
API = HfApi(token=TOKEN)

task_mappings = {
    'automatic speech recognition': 'automatic-speech-recognition',
    'Object Detection': 'object-detection',
    'Text Classification': 'text-classification',
    'Image to Text': 'image-to-text',
    'Question Answering': 'question-answering',
    'Text Generation': 'text-generation',
    'Image Classification': 'image-classification',
    'Sentence Similarity': 'sentence-similarity',
    'Image Generation': 'image-generation',
    'Summarization': 'summarization'
}

@dataclass
class ModelDetails:
    name: str
    display_name: str = ""
    symbol: str = ""  # emoji

def start_compute_space():
    API.restart_space(COMPUTE_SPACE)
    gr.Info(f"Okay! {COMPUTE_SPACE} should be running now!")

def get_model_size(model_info: ModelInfo):
    """Gets the model size from the configuration, or the model name if the configuration does not contain the information."""
    try:
        model_size = round(model_info.safetensors["total"] / 1e9, 3)
    except (AttributeError, TypeError):
        return 0  # Unknown model sizes are indicated as 0, see NUMERIC_INTERVALS in app.py
    return model_size

def add_docker_eval(zip_file):
    new_fid_list = zip_file.split("/")
    new_fid = new_fid_list[-1]
    if new_fid.endswith('.zip'):
        API.upload_file(
            path_or_fileobj=zip_file,
            repo_id="AIEnergyScore/tested_proprietary_models",
            path_in_repo='submitted_models/' + new_fid,
            repo_type="dataset",
            commit_message="Adding logs via submission Space.",
            token=TOKEN
        )
        gr.Info('Uploaded logs to dataset! We will validate their validity and add them to the next version of the leaderboard.')
    else:
        gr.Info('You can only upload .zip files here!')

def add_new_eval(repo_id: str, task: str):
    model_owner = repo_id.split("/")[0]
    model_name = repo_id.split("/")[1]
    current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
    requests = load_dataset("AIEnergyScore/requests_debug", split="test", token=TOKEN)
    requests_dset = requests.to_pandas()
    model_list = requests_dset[requests_dset['status'] == 'COMPLETED']['model'].tolist()
    task_models = list(API.list_models(filter=task_mappings[task]))
    task_model_names = [m.id for m in task_models]
    if repo_id in model_list:
        gr.Info('This model has already been run!')
    elif repo_id not in task_model_names:
        gr.Info("This model isn't compatible with the chosen task! Pick a different model-task combination")
    else:
        # Is the model info correctly filled?
        try:
            model_info = API.model_info(repo_id=repo_id)
            model_size = get_model_size(model_info=model_info)
            likes = model_info.likes
        except Exception:
            gr.Info("Could not find information for model %s" % (model_name))
            model_size = None
            likes = None

        gr.Info("Adding request")

        request_dict = {
            "model": repo_id,
            "status": "PENDING",
            "submitted_time": pd.to_datetime(current_time),
            "task": task_mappings[task],
            "likes": likes,
            "params": model_size,
            "leaderboard_version": "v0",
        }

        print("Writing out request file to dataset")
        df_request_dict = pd.DataFrame([request_dict])
        print(df_request_dict)
        df_final = pd.concat([requests_dset, df_request_dict], ignore_index=True)
        updated_dset = Dataset.from_pandas(df_final)
        updated_dset.push_to_hub("AIEnergyScore/requests_debug", split="test", token=TOKEN)

        gr.Info("Starting compute space at %s " % COMPUTE_SPACE)
        return start_compute_space()

def print_existing_models():
    requests = load_dataset("AIEnergyScore/requests_debug", split="test", token=TOKEN)
    requests_dset = requests.to_pandas()
    model_df = requests_dset[['model', 'status']]
    model_df = model_df[model_df['status'] == 'COMPLETED']
    return model_df

def highlight_cols(x):
    df = x.copy()
    df[df['status'] == 'COMPLETED'] = 'color: green'
    df[df['status'] == 'PENDING'] = 'color: orange'
    df[df['status'] == 'FAILED'] = 'color: red'
    return df

# Applying the style function
existing_models = print_existing_models()
formatted_df = existing_models.style.apply(highlight_cols, axis=None)

def get_leaderboard_models():
    path = r'leaderboard_v0_data/energy'
    filenames = glob.glob(path + "/*.csv")
    data = []
    for filename in filenames:
        data.append(pd.read_csv(filename))
    leaderboard_data = pd.concat(data, ignore_index=True)
    return leaderboard_data[['model', 'task']]

# A placeholder for get_zip_data_link() -- replace with your actual implementation if available.
def get_zip_data_link():
    return (
        '<a href="https://example.com/download.zip" '
        'style="margin: 0 10px; text-decoration: none; font-weight: bold; font-size: 1.1em; '
        'color: black; font-family: \'Inter\', sans-serif;">Download Logs</a>'
    )

with gr.Blocks() as demo:
    # --- Header Links (at the very top, evenly spaced) ---
    gr.HTML("""
        <style>
          .header-link {
             color: black !important;
          }
          @media (prefers-color-scheme: dark) {
             .header-link {
                color: white !important;
             }
          }
        </style>
        <div style="display: flex; justify-content: space-evenly; align-items: center; margin-bottom: 20px;">
            <a class="header-link" href="https://huggingface.co/spaces/AIEnergyScore/leaderboard" style="text-decoration: none; font-weight: bold; font-size: 1.1em; font-family: 'Inter', sans-serif;">Leaderboard</a>
            <a class="header-link" href="https://huggingface.co/spaces/AIEnergyScore/Label" style="text-decoration: none; font-weight: bold; font-size: 1.1em; font-family: 'Inter', sans-serif;">Label Generator</a>
            <a class="header-link" href="https://huggingface.github.io/AIEnergyScore/#faq" style="text-decoration: none; font-weight: bold; font-size: 1.1em; font-family: 'Inter', sans-serif;">FAQ</a>
            <a class="header-link" href="https://huggingface.github.io/AIEnergyScore/#documentation" style="text-decoration: none; font-weight: bold; font-size: 1.1em; font-family: 'Inter', sans-serif;">Documentation</a>
            <a class="header-link" href="https://huggingface.co/spaces/AIEnergyScore/README/discussions" style="text-decoration: none; font-weight: bold; font-size: 1.1em; font-family: 'Inter', sans-serif;">Community</a>
        </div>
    """)


    # --- Logo (centered) ---
    gr.HTML("""
        <div style="margin-top: 0px;">
            <picture style="display: block; margin: 0 auto; max-width: 300px;">
                <source media="(prefers-color-scheme: dark)" srcset="https://huggingface.co/spaces/AIEnergyScore/Leaderboard/resolve/main/logodark.png">
                <img src="https://huggingface.co/spaces/AIEnergyScore/Leaderboard/resolve/main/logo.png" 
                     alt="Logo" 
                     style="display: block; margin: 0 auto; max-width: 300px; height: auto;">
            </picture>
        </div>
    """)

    gr.Markdown('<div style="text-align: center;"><h2>Submission Portal</h2></div>')    
    gr.Markdown('<div style="text-align: center;">If you want us to evaluate a model hosted on the 🤗 Hub, enter the model ID and choose the corresponding task from the dropdown list below, then click <strong>Run Analysis</strong> to launch the benchmarking process.</div>')    
    gr.Markdown('<div style="text-align: center;">If you\'ve used the <a href="https://github.com/huggingface/AIEnergyScore/">Docker file</a> to run your own evaluation, please submit the resulting log files at the bottom of the page.</div>')    
    gr.Markdown('<div style="text-align: center;">The <a href="https://huggingface.co/spaces/AIEnergyScore/Leaderboard">Project Leaderboard</a> will be updated on a biannual basis (last updated in February 2025).</div>')    
    
    with gr.Row():
        with gr.Column():
            task = gr.Dropdown(
                choices=list(task_mappings.keys()),
                label="Choose a benchmark task",
                value='Text Generation',
                multiselect=False,
                interactive=True,
            )
        with gr.Column():
            model_name_textbox = gr.Textbox(label="Model name (user_name/model_name)")
    
    with gr.Row():
        with gr.Column():
            submit_button = gr.Button("Submit for Analysis")
            submission_result = gr.Markdown()
            submit_button.click(
                fn=add_new_eval,
                inputs=[model_name_textbox, task],
                outputs=submission_result,
            )
    
    with gr.Row():
        with gr.Column():
            with gr.Accordion("Submit log files from a Docker run:", open=False):
                gr.Markdown("""
                **⚠️ Warning: By uploading the zip file, you confirm that you have read and agree to the following terms:**
            
                - **Public Data Sharing:** You consent to the public sharing of the energy performance data derived from your submission. No additional information related to this model, including proprietary configurations, will be disclosed.
                - **Data Integrity:** You certify that the log files submitted are accurate, unaltered, and generated directly from testing your model as per the specified benchmarking procedures.
                - **Model Representation:** You affirm that the model tested and submitted is representative of the production-level version, including its level of quantization and any other relevant characteristics impacting energy efficiency and performance.
                """)
                file_output = gr.File(visible=False)
                u = gr.UploadButton("Upload a zip file with logs", file_count="single", interactive=True)
                u.upload(add_docker_eval, u, file_output)

    with gr.Row():
        with gr.Column():
            with gr.Accordion("Models that are in the latest leaderboard version:", open=False, visible=False):
                gr.Dataframe(get_leaderboard_models())
            with gr.Accordion("Models that have been benchmarked recently:", open=False, visible=False):
                gr.Dataframe(formatted_df)

demo.launch()