import streamlit as st
import cv2
import numpy as np
import tempfile
import os
# import easyocr
from PIL import Image, ImageDraw, ImageFont
from deep_translator import GoogleTranslator
import base64
from paddleocr import PaddleOCR

from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace

os.environ["HUGGINGFACEHUB_API_KEY"] = os.getenv("HF")
os.environ["HF_TOKEN"] = os.getenv("HF")

st.set_page_config(
    page_title="MediAssist - Prescription Analyzer",
    layout="wide",
    page_icon="💊"
)


def set_background(image_file):
    with open(image_file, "rb") as image:
        encoded = base64.b64encode(image.read()).decode()
    st.markdown(
        f"""
        <style>
        .stApp {{
            background-image: linear-gradient(rgba(0, 0, 0, 0.4), rgba(0, 0, 0, 0.4)),
                              url("data:image/jpg;base64,{encoded}");
            background-size: cover;
            background-repeat: no-repeat;
            background-attachment: fixed;
        }}
        .main-title {{
            color: #ffffff;
            text-align: center;
            font-size: 3em;
            font-weight: 900;
            margin-bottom: 0.2em;
            text-shadow: 2px 2px 4px rgba(0, 0, 0, 0.7);
        }}
        .subtitle {{
            color: #f0f0f0;
            text-align: center;
            font-size: 1.4em;
            margin-bottom: 0.5em;
            text-shadow: 1px 1px 3px rgba(0, 0, 0, 0.6);
        }}
        .quote {{
            color: #eeeeee;
            text-align: center;
            font-style: italic;
            font-size: 1.2em;
            margin-bottom: 2em;
            text-shadow: 1px 1px 3px rgba(0, 0, 0, 0.6);
        }}
        h1, h3 {{
            color: #ffffff !important;
            text-shadow: 2px 2px 5px rgba(0, 0, 0, 0.8);
            font-weight: 800;
        }}
        .stButton>button {{
            width: 100%;
            font-size: 1.1em;
            padding: 0.8em;
            border-radius: 10px;
        }}
        </style>
        """,
        unsafe_allow_html=True
    )

# Split large response into smaller chunks (for translation)
# def split_text_into_chunks(text, max_length=450):
#     lines = text.split('\n')
#     chunks = []
#     current = ""
#     for line in lines:
#         if len(current) + len(line) + 1 <= max_length:
#             current += line + '\n'
#         else:
#             chunks.append(current.strip())
#             current = line + '\n'
#     if current:
#         chunks.append(current.strip())
#     return chunks


def save_text_as_image(text, file_path):
    font = ImageFont.load_default()
    lines = text.split('\n')
    max_width = max([font.getbbox(line)[2] for line in lines]) + 20
    line_height = font.getbbox(text)[3] + 10
    img_height = line_height * len(lines) + 20

    img = Image.new("RGB", (max_width, img_height), "white")
    draw = ImageDraw.Draw(img)
    y = 10
    for line in lines:
        draw.text((10, y), line, font=font, fill="black")
        y += line_height

    img.save(file_path)
    return file_path


set_background("background_img.jpg")

# # OCR
# @st.cache_resource
# def load_easyocr_reader():
#     return easyocr.Reader(['en'])

st.sidebar.title("💊 MediAssist")
st.sidebar.markdown("Analyze prescriptions with ease using AI")
st.sidebar.markdown("---")
st.sidebar.markdown("🔗 **Connect with me:**")
st.sidebar.markdown("""
<div style='display: flex; gap: 10px;'>
    <a href="https://github.com/Yashvj22" target="_blank">
        <img src="https://img.shields.io/badge/GitHub-100000?style=for-the-badge&logo=github&logoColor=white" style="height:30px;">
    </a>
    <a href="https://www.linkedin.com/in/yash-jadhav-454b0a237/" target="_blank">
        <img src="https://img.shields.io/badge/LinkedIn-0A66C2?style=for-the-badge&logo=linkedin&logoColor=white" style="height:30px;">
    </a>
</div>
""", unsafe_allow_html=True)
st.sidebar.markdown("---")

st.markdown("""
    <h1 style='text-align: center; color: #4A90E2;'>🧠 MediAssist</h1>
    <h3 style='text-align: center;'>Prescription Analyzer using AI and OCR</h3>
    <p style='text-align: center;'>Upload a doctor's prescription image, and MediAssist will extract, translate, and explain it for you.</p>
    <br>
""", unsafe_allow_html=True)

uploaded_file = st.file_uploader("📤 Upload Prescription Image (JPG/PNG)", type=["jpg", "jpeg", "png"])

if uploaded_file:
    with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as temp_file:
        temp_file.write(uploaded_file.read())
        orig_path = temp_file.name

    # Image preprocessing
    image = cv2.imread(orig_path)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    _, binary_inv = cv2.threshold(gray, 128, 255, cv2.THRESH_BINARY_INV)
    kernel = np.ones((3, 3), np.uint8)
    dilated = cv2.dilate(binary_inv, kernel, iterations=1)

    dilated_path = orig_path.replace(".png", "_dilated.png")
    cv2.imwrite(dilated_path, dilated)

    
    ocr = PaddleOCR(use_angle_cls=True, lang='en')  # use_angle_cls for better orientation handling
    result = ocr.ocr(dilated_path, cls=True)
    text_list = [line[1][0] for line in result[0]]  # Extract only text
    
    text = "\n".join(text_list)

    # reader = easyocr.Reader(['en'])
    # text_list = reader.readtext(dilated, detail=0)
    # text = "\n".join(text_list)

    col1, col2 = st.columns([1, 2])
    with col1:
        st.image(dilated, caption="🧾 Preprocessed Prescription", channels="GRAY", use_container_width=True)
    with col2:
        st.success("✅ Image Uploaded and Preprocessed")
        st.markdown("#### 📝 Extracted Text")
        st.code(text)

    # Prompt LLM
    template = """
    You are a helpful and structured medical assistant.
    
    Below is a prescription text extracted from an image:
    
    {prescription_text}
    
    Your tasks:
    
    1. Identify and list only the medicine names mentioned (ignore other irrelevant text).
    2. For each identified medicine, provide the following:
       - Dosage and Timing
       - Possible Side Effects
       - Special Instructions
    
    🧾 Format your response clearly and neatly as follows:
    
    - Medicine Name 1
      - Dosage and Timing: ...
      - Side Effects: ...
      - Special Instructions: ...
    
    - Medicine Name 2
      - Dosage and Timing: ...
      - Side Effects: ...
      - Special Instructions: ...
    
    Ensure each medicine starts with a new bullet point and all details are on separate lines and don't bold any bullet point.
    """
    
    prompt = PromptTemplate(input_variables=["prescription_text"], template=template)

    llm_model = HuggingFaceEndpoint(
        repo_id="Qwen/Qwen3-235B-A22B",
        provider="nebius",
        temperature=0.6,
        max_new_tokens=300,
        task="conversational"
    )
    
    llm = ChatHuggingFace(
        llm=llm_model,
        repo_id="Qwen/Qwen3-235B-A22B",
        provider="nebius",
        temperature=0.6,
        max_new_tokens=300,
        task="conversational"
    )
    
    chain = LLMChain(llm=llm, prompt=prompt)

    filtered_output = ""
    hindi_text = ""

    if st.button("🔍 Analyze Extracted Text"):
        with st.spinner("Analyzing with LLM..."):
            response = chain.run(prescription_text=text)
            parts = response.split("</think>")

            if len(parts) > 1:
                filtered_output = parts[1].strip()
            else:
                filtered_output = response 
            
        st.markdown("#### 💡 AI-based Medicine Analysis")
        st.text_area("LLM Output", filtered_output, height=300)

        # Save txt and image
        txt_path = "medicine_analysis.txt"
        with open(txt_path, "w") as f:
            f.write(filtered_output)

        img_path = "medicine_analysis.png"
        save_text_as_image(filtered_output, img_path)

        st.markdown("#### 📥 Download (English)")
        col1, col2 = st.columns(2)
        with col1:
            st.download_button("⬇️ English TXT", data=filtered_output.encode(), file_name="medicine_analysis.txt")
        with col2:
            with open(img_path, "rb") as img_file:
                st.download_button("🖼️ English Image", data=img_file, file_name="medicine_analysis.png", mime="image/png")


        
    # if filtered_output and st.button("🌐 Translate to Hindi"):
    #     with st.spinner("Translating to Hindi..."):
    
    #         def clean_text(text):
    #             text = text.replace("•", "-")  # Replace bullets
    #             text = re.sub(r"\s{2,}", " ", text)  # Remove extra spaces
    #             text = re.sub(r"[^\w\s,.:-]", "", text)  # Keep only safe characters
    #             return text
    
    #         cleaned_output = clean_text(filtered_output)
    
    #         try:
    #             hindi_text = GoogleTranslator(source='en', target='hi').translate(cleaned_output)
    #         except Exception as e:
    #             hindi_text = "[Translation failed]"
    
    #         # Formatting translated text
    #         formatted_text = re.sub(r'(?<=\s)-\s', r'\n- ', hindi_text)
    
    #         # Add line breaks before keywords
    #         keywords = ["खुराक और समय", "साइड इफेक्ट्स", "विशेष निर्देश"]
    #         for kw in keywords:
    #             formatted_text = formatted_text.replace(f"- {kw}", f"\n  - {kw}")
    
    #         final_text = formatted_text.strip()
    
    #     st.markdown("#### 🌐 Hindi Translation")
    #     st.text_area("Translated Output (Hindi)", value=final_text, height=300)
    
    #     hindi_img_path = "hindi_output.png"
    #     save_text_as_image(final_text, hindi_img_path)
    
    #     st.markdown("#### 📥 Download (Hindi)")
    #     col3, col4 = st.columns(2)
    #     with col3:
    #         st.download_button("⬇️ Hindi TXT", data=final_text.encode(), file_name="hindi_medicine_analysis.txt")
    #     with col4:
    #         with open(hindi_img_path, "rb") as img_file:
    #             st.download_button("🖼️ Hindi Image", data=img_file, file_name="hindi_medicine_analysis.png", mime="image/png")
                

    try:
        os.remove(orig_path)
        os.remove(dilated_path)

    except:
        pass

else:
    st.markdown("<center><i>📸 Please Upload Scanned prescription image to get best result</i></center>", unsafe_allow_html=True)





# import streamlit as st
# import cv2
# import numpy as np
# import tempfile
# import os
# import easyocr

# from langchain.prompts import PromptTemplate
# from langchain.chains import LLMChain
# from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace

# # Set Hugging Face API keys
# os.environ["HUGGINGFACEHUB_API_KEY"] = os.getenv("HF")
# os.environ["HF_TOKEN"] = os.getenv("HF")

# # Streamlit page setup
# st.set_page_config(
#     page_title="MediAssist - Prescription Analyzer",
#     layout="wide",
#     page_icon="💊"
# )

# st.sidebar.title("💊 MediAssist")
# st.sidebar.markdown("Analyze prescriptions with ease using AI")
# st.sidebar.markdown("---")
# st.sidebar.markdown("🔗 **Connect with me:**")
# st.sidebar.markdown("""
# <div style='display: flex; gap: 10px;'>
#     <a href="https://github.com/Yashvj22" target="_blank">
#         <img src="https://img.shields.io/badge/GitHub-100000?style=for-the-badge&logo=github&logoColor=white" style="height:30px;">
#     </a>
#     <a href="https://www.linkedin.com/in/yash-jadhav-454b0a237/" target="_blank">
#         <img src="https://img.shields.io/badge/LinkedIn-0A66C2?style=for-the-badge&logo=linkedin&logoColor=white" style="height:30px;">
#     </a>
# </div>
# """, unsafe_allow_html=True)
# st.sidebar.markdown("---")

# st.markdown("""
#     <h1 style='text-align: center; color: #4A90E2;'>🧠 MediAssist</h1>
#     <h3 style='text-align: center;'>Prescription Analyzer using AI and OCR</h3>
#     <p style='text-align: center;'>Upload a doctor's prescription image, and MediAssist will extract, translate, and explain it for you.</p>
#     <br>
# """, unsafe_allow_html=True)

# uploaded_file = st.file_uploader("📤 Upload Prescription Image (JPG/PNG)", type=["jpg", "jpeg", "png"])

# if uploaded_file:
#     with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as temp_file:
#         temp_file.write(uploaded_file.read())
#         orig_path = temp_file.name

#     # Preprocessing
#     image = cv2.imread(orig_path)
#     gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#     _, binary_inv = cv2.threshold(gray, 128, 255, cv2.THRESH_BINARY_INV)
#     kernel = np.ones((3, 3), np.uint8)
#     dilated = cv2.dilate(binary_inv, kernel, iterations=1)

#     # Save preprocessed image for future reference/removal
#     dilated_path = orig_path.replace(".png", "_dilated.png")
#     cv2.imwrite(dilated_path, dilated)

#     # OCR using EasyOCR
#     reader = easyocr.Reader(['en'])
#     text_list = reader.readtext(dilated, detail=0)
#     text = "\n".join(text_list)

#     # Prompt Template
#     template = """
#         You are a helpful medical assistant.

#         Here is a prescription text extracted from an image:

#         {prescription_text}

#         Please do the following:

#         1. Extract only the medicine names mentioned in the prescription (ignore any other text).
#         2. For each medicine, provide:
#            - When to take it (timing and dosage)
#            - Possible side effects
#            - Any special instructions

#         Format your answer as bullet points, listing only medicines and their details.
#     """
#     prompt = PromptTemplate(input_variables=["prescription_text"], template=template)

#     llm_model = HuggingFaceEndpoint(
#         repo_id="aaditya/Llama3-OpenBioLLM-70B",
#         provider="nebius",
#         temperature=0.6,
#         max_new_tokens=300,
#         task="conversational"
#     )

#     llm = ChatHuggingFace(
#         llm=llm_model,
#         repo_id="aaditya/Llama3-OpenBioLLM-70B",
#         provider="nebius",
#         temperature=0.6,
#         max_new_tokens=300,
#         task="conversational"
#     )

#     chain = LLMChain(llm=llm, prompt=prompt)

#     col1, col2 = st.columns([1, 2])

#     with col1:
#         st.image(dilated, caption="Preprocessed Prescription", channels="GRAY", use_container_width=True)

#     with col2:
#         st.success("✅ Prescription Uploaded & Preprocessed Successfully")
#         st.markdown("### 📜 Extracted Text")
#         st.code(text)

#         if st.button("🔍 Analyze Text"):
#             with st.spinner("Analyzing..."):
#                 response = chain.run(prescription_text=text)
#             st.success(response)

#     # Cleanup temp files
#     os.remove(orig_path)
#     os.remove(dilated_path)

# else:
#     st.markdown("<center><i>Upload a prescription image to begin analysis.</i></center>", unsafe_allow_html=True)





# import streamlit as st
# import cv2
# import numpy as np
# import tempfile
# import os
# # import pytesseract
# import easyocr

# # from langchain.document_loaders.image import UnstructuredImageLoader
# # from langchain_community.document_loaders import UnstructuredImageLoader
# from langchain.prompts import PromptTemplate
# from langchain.chains import LLMChain
# from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace

# # Set Hugging Face API keys
# os.environ["HUGGINGFACEHUB_API_KEY"] = os.getenv("HF")
# os.environ["HF_TOKEN"] = os.getenv("HF")

# st.set_page_config(
#     page_title="MediAssist - Prescription Analyzer",
#     layout="wide",
#     page_icon="💊"
# )

# st.sidebar.title("💊 MediAssist")
# st.sidebar.markdown("Analyze prescriptions with ease using AI")
# st.sidebar.markdown("---")

# st.sidebar.markdown("🔗 **Connect with me:**")
# st.sidebar.markdown("""
# <div style='display: flex; gap: 10px;'>
#     <a href="https://github.com/Yashvj22" target="_blank">
#         <img src="https://img.shields.io/badge/GitHub-100000?style=for-the-badge&logo=github&logoColor=white" style="height:30px;">
#     </a>
#     <a href="https://www.linkedin.com/in/yash-jadhav-454b0a237/" target="_blank">
#         <img src="https://img.shields.io/badge/LinkedIn-0A66C2?style=for-the-badge&logo=linkedin&logoColor=white" style="height:30px;">
#     </a>
# </div>
# """, unsafe_allow_html=True)
# st.sidebar.markdown("---")

# st.markdown("""
#     <h1 style='text-align: center; color: #4A90E2;'>🧠 MediAssist</h1>
#     <h3 style='text-align: center;'>Prescription Analyzer using AI and OCR</h3>
#     <p style='text-align: center;'>Upload a doctor's prescription image, and MediAssist will extract, translate, and explain it for you.</p>
#     <br>
# """, unsafe_allow_html=True)

# uploaded_file = st.file_uploader("📤 Upload Prescription Image (JPG/PNG)", type=["jpg", "jpeg", "png"])

# if uploaded_file:
#     with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as temp_file:
#         temp_file.write(uploaded_file.read())
#         orig_path = temp_file.name

#     # Step 1: Read and preprocess image
#     image = cv2.imread(orig_path)
#     gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#     _, binary_inv = cv2.threshold(gray, 128, 255, cv2.THRESH_BINARY_INV)
#     kernel = np.ones((3, 3), np.uint8)
#     dilated = cv2.dilate(binary_inv, kernel, iterations=1)

#     reader = easyocr.Reader(['en'])
#     text_list = reader.readtext(dilated, detail=0)
#     text = "\n".join(text_list)

#     # text = pytesseract.image_to_string(dilated)

#     # Save preprocessed image for OCR
#     # dilated_path = orig_path.replace(".png", "_dilated.png")
#     # cv2.imwrite(dilated_path, dilated)

#     # loader = UnstructuredImageLoader(dilated_path)
#     # documents = loader.load()
#     # extracted_text = "\n".join([doc.page_content for doc in documents])

#     template = """
#         You are a helpful medical assistant.
        
#         Here is a prescription text extracted from an image:
        
#         {prescription_text}
        
#         Please do the following:
        
#         1. Extract only the medicine names mentioned in the prescription (ignore any other text).
#         2. For each medicine, provide:
#            - When to take it (timing and dosage)
#            - Possible side effects
#            - Any special instructions
        
#         Format your answer as bullet points, listing only medicines and their details.
#     """
#     prompt = PromptTemplate(input_variables=["prescription_text"], template=template)

#     llm_model = HuggingFaceEndpoint(
#         repo_id="aaditya/Llama3-OpenBioLLM-70B",
#         provider="nebius",
#         temperature=0.6,
#         max_new_tokens=300,
#         task="conversational"
#     )

#     model = ChatHuggingFace(
#         llm=llm_model,
#         repo_id="aaditya/Llama3-OpenBioLLM-70B",
#         provider="nebius",
#         temperature=0.6,
#         max_new_tokens=300,
#         task="conversational"
#     )
    
#     chain = LLMChain(llm=model, prompt=prompt)

   
#     col1, col2 = st.columns([1, 2])

#     with col1:
#         st.image(dilated, caption="Preprocessed Prescription", channels="GRAY", use_container_width=True)

#     with col2:
#         st.success("✅ Prescription Uploaded & Preprocessed Successfully")

#         st.markdown("### 📜 Extracted Text")
#         st.code(text)
    
#         # st.code(extracted_text)

#         if st.button("🔍 Analyze Text"):
#             with st.spinner("Analyzing..."):
#                 response = chain.run(prescription_text=text)
#                 # response = chain.run(prescription_text=extracted_text)
#             st.success(response)

#     # Cleanup temp files
#     os.remove(orig_path)
#     os.remove(dilated_path)

# else:
#     st.markdown("<center><i>Upload a prescription image to begin analysis.</i></center>", unsafe_allow_html=True)



        # st.markdown("### 🌐 Translated Text")
        # st.code("पेरासिटामोल 500 मिलीग्राम\nभोजन के बाद दिन में दो बार 1 गोली लें", language='text')

        # st.markdown("### ⏱️ Tablet Timing & Instructions")
        # st.info("- Morning after breakfast\n- Night after dinner\n- Take with water\n- Do not exceed 2 tablets in 24 hours")

        # st.markdown("### ⚠️ Possible Side Effects")
        # st.warning("- Nausea\n- Dizziness\n- Liver damage (on overdose)")

    # os.remove(temp_path)
    # os.remove(orig_path)
    # os.remove(dilated_path)