Spaces:
Running
Running
File size: 3,767 Bytes
673f059 5fa72d8 98f5c9d 5fa72d8 673f059 5fa72d8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 |
import streamlit as st
import cv2
import numpy as np
import tempfile
import os
from langchain_community.document_loaders import UnstructuredImageLoader
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace
# Set Hugging Face API key
os.environ["HUGGINGFACEHUB_API_KEY"] = os.getenv("hf")
st.set_page_config(page_title="MediAssist - Prescription Analyzer", layout="wide")
# Sidebar
st.sidebar.title("π· Medical Chatbot")
st.sidebar.markdown("Analyze prescriptions with ease using AI")
st.sidebar.markdown("---")
# App Header
st.markdown("""
<h1 style='text-align: center; color: #4A90E2;'>π§ Medical Chatbot</h1>
<h3 style='text-align: center;'>Prescription Analyzer using AI </h3>
<p style='text-align: center;'>Upload a doctor's prescription image, and MediAssist will extract, translate, and explain it for you.</p>
<br>
""", unsafe_allow_html=True)
# File uploader
uploaded_file = st.file_uploader("π€ Upload Prescription Image (JPG/PNG)", type=["jpg", "jpeg", "png"])
if uploaded_file:
with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as temp_file:
temp_file.write(uploaded_file.read())
orig_path = temp_file.name
# Step 1: Read and preprocess image
image = cv2.imread(orig_path)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
_, binary_inv = cv2.threshold(gray, 128, 255, cv2.THRESH_BINARY_INV)
kernel = np.ones((3, 3), np.uint8)
dilated = cv2.dilate(binary_inv, kernel, iterations=1)
# Save processed image for OCR
dilated_path = orig_path.replace(".png", "_dilated.png")
cv2.imwrite(dilated_path, dilated)
# Load with LangChain
loader = UnstructuredImageLoader(dilated_path)
documents = loader.load()
extracted_text = "\n".join([doc.page_content for doc in documents])
# Prompt template
template = """
You are a helpful medical assistant.
Here is a prescription text extracted from an image:
{prescription_text}
Please do the following:
1. Extract only the medicine names mentioned in the prescription (ignore any other text).
2. For each medicine, provide:
- When to take it (timing and dosage)
- Possible side effects
- Any special instructions
Format your answer as bullet points, listing only medicines and their details.
"""
prompt = PromptTemplate(input_variables=["prescription_text"], template=template)
# Set up Hugging Face LLM
llm_model = HuggingFaceEndpoint(
repo_id="aaditya/Llama3-OpenBioLLM-70B",
provider="nebius",
temperature=0.6,
max_new_tokens=300,
task="conversational"
)
model = ChatHuggingFace(
llm=llm_model,
repo_id="aaditya/Llama3-OpenBioLLM-70B",
provider="nebius",
temperature=0.6,
max_new_tokens=300,
task="conversational"
)
chain = LLMChain(llm=model, prompt=prompt)
# Display image and extracted text
col1, col2 = st.columns([1, 2])
with col1:
st.image(dilated, caption="Preprocessed Prescription", channels="GRAY", use_container_width=True)
with col2:
st.success("β
Prescription Uploaded & Preprocessed Successfully")
st.markdown("### π Extracted Text")
st.code(extracted_text)
if st.button("π Analyze Text"):
with st.spinner("Analyzing..."):
response = chain.run(prescription_text=extracted_text)
st.success(response)
# Cleanup temp files
os.remove(orig_path)
os.remove(dilated_path)
else:
st.markdown("<center><i>Upload a prescription image to begin analysis.</i></center>", unsafe_allow_html=True)
|