import json
import os
import pathlib

import huggingface_hub
import requests
from huggingface_hub import ModelCard
from huggingface_hub.hf_api import ModelInfo
from transformers import AutoConfig
from transformers.models.auto.tokenization_auto import AutoTokenizer

from src.display.utils import EvalQueuedModel


def check_model_card(repo_id: str) -> tuple[bool, str]:
    """Checks if the model card and license exist and have been filled"""
    try:
        card = ModelCard.load(repo_id)
    except huggingface_hub.utils.EntryNotFoundError:
        return False, "Please add a model card to your model to explain how you trained/fine-tuned it."

    # Enforce license metadata
    if card.data.license is None:
        if not ("license_name" in card.data and "license_link" in card.data):
            return False, (
                "License not found. Please add a license to your model card using the `license` metadata or a"
                " `license_name`/`license_link` pair."
            )

    # Enforce card content
    if len(card.text) < 200:
        return False, "Please add a description to your model card, it is too short."

    return True, ""


def is_model_on_hub(
    model_name: str, revision: str, token: str = None, trust_remote_code=False, test_tokenizer=False
) -> tuple[bool, str]:
    """Checks if the model model_name is on the hub, and whether it (and its tokenizer) can be loaded with AutoClasses."""
    try:
        config = AutoConfig.from_pretrained(
            model_name, revision=revision, trust_remote_code=trust_remote_code, token=token
        )
        if test_tokenizer:
            try:
                AutoTokenizer.from_pretrained(
                    model_name, revision=revision, trust_remote_code=trust_remote_code, token=token
                )
            except ValueError as e:
                return (False, f"uses a tokenizer which is not in a transformers release: {e}", None)
            except Exception:
                return (
                    False,
                    "'s tokenizer cannot be loaded. Is your tokenizer class in a stable transformers release, and correctly configured?",
                    None,
                )
        return True, None, config

    except ValueError:
        return (
            False,
            "needs to be launched with `trust_remote_code=True`. For safety reason, we do not allow these models to be automatically submitted to the leaderboard.",
            None,
        )

    except OSError as e:
        if "gated repo" in str(e):
            slack_webhook_url = os.environ["SLACK_WEBHOOK_URL"]
            text = f"<!channel>\n{model_name} is gated model! Please submit this model."
            requests.post(slack_webhook_url, data=json.dumps({"text": text}))
            return False, "is gated model! Please wait.", None
        return False, "was not found on hub!", None
    except Exception:
        return False, "was not found on hub!", None


def get_model_size(model_info: ModelInfo, precision: str):
    """Gets the model size from the configuration, or the model name if the configuration does not contain the information."""
    try:
        model_size = round(model_info.safetensors["total"] / 1e9, 3)
    except (AttributeError, TypeError):
        return 0  # Unknown model sizes are indicated as 0, see NUMERIC_INTERVALS in app.py

    size_factor = 8 if (precision == "GPTQ" or "gptq" in model_info.modelId.lower()) else 1
    model_size = size_factor * model_size
    return model_size


def get_model_arch(model_info: ModelInfo):
    """Gets the model architecture from the configuration"""
    return model_info.config.get("architectures", "Unknown")


def already_submitted_models(requested_models_dir: pathlib.Path) -> set[EvalQueuedModel]:
    """Gather a list of already submitted models to avoid duplicates"""
    queued_models = set()
    for json_path in requested_models_dir.glob("*/*.json"):
        with json_path.open() as f:
            info = json.load(f)
        queued_models.add(
            EvalQueuedModel(
                model=info["model"],
                revision=info["revision"],
                precision=info["precision"],
                add_special_tokens=info["add_special_tokens"],
                llm_jp_eval_version=info["llm_jp_eval_version"],
                vllm_version=info["vllm_version"],
            )
        )
    return queued_models