hponepyae commited on
Commit
50aaa9b
·
verified ·
1 Parent(s): c5882f3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -16
app.py CHANGED
@@ -5,7 +5,7 @@ import torch
5
  import os
6
  import spaces
7
 
8
- # --- Initialize the Model Pipeline (As per your working example) ---
9
  print("Loading MedGemma model...")
10
  try:
11
  pipe = pipeline(
@@ -21,12 +21,11 @@ except Exception as e:
21
  model_loaded = False
22
  print(f"Error loading model: {e}")
23
 
24
- # --- Core Analysis Function (Using the logic from your working example) ---
25
  @spaces.GPU()
26
  def analyze_symptoms(symptom_image: Image.Image, symptoms_text: str):
27
  """
28
- Analyzes user's symptoms using the definitive calling convention demonstrated
29
- in the working X-ray analyzer example.
30
  """
31
  if not model_loaded:
32
  return "Error: The AI model could not be loaded. Please check the Space logs."
@@ -36,7 +35,6 @@ def analyze_symptoms(symptom_image: Image.Image, symptoms_text: str):
36
  return "Please describe your symptoms or upload an image for analysis."
37
 
38
  try:
39
- # --- DEFINITIVE MESSAGE CONSTRUCTION (from your example) ---
40
  system_prompt = (
41
  "You are an expert, empathetic AI medical assistant. Analyze the potential "
42
  "medical condition based on the following information. Provide a list of "
@@ -45,10 +43,8 @@ def analyze_symptoms(symptom_image: Image.Image, symptoms_text: str):
45
  )
46
 
47
  user_content = []
48
- # The user's prompt text is always present.
49
  user_content.append({"type": "text", "text": symptoms_text})
50
 
51
- # The actual PIL image object is added to the content list if it exists.
52
  if symptom_image:
53
  user_content.append({"type": "image", "image": symptom_image})
54
 
@@ -57,17 +53,17 @@ def analyze_symptoms(symptom_image: Image.Image, symptoms_text: str):
57
  {"role": "user", "content": user_content}
58
  ]
59
 
 
60
  generation_args = {
61
- "max_new_tokens": 512,
62
  "do_sample": True,
63
  "temperature": 0.7,
64
  }
65
 
66
- # --- DEFINITIVE PIPELINE CALL (from your example) ---
67
  # The entire messages structure is passed to the `text` argument.
68
  output = pipe(text=messages, **generation_args)
69
 
70
- # The result is the 'content' of the last generated message.
71
  result = output[0]["generated_text"][-1]["content"]
72
 
73
  disclaimer = "\n\n***Disclaimer: I am an AI assistant and not a medical professional. This is not a diagnosis. Please consult a doctor for any health concerns.***"
@@ -78,7 +74,7 @@ def analyze_symptoms(symptom_image: Image.Image, symptoms_text: str):
78
  print(f"An exception occurred during analysis: {type(e).__name__}: {e}")
79
  return f"An error occurred during analysis. Please check the logs for details: {str(e)}"
80
 
81
- # --- Gradio Interface (Your original, no changes needed) ---
82
  with gr.Blocks(theme=gr.themes.Soft(), title="AI Symptom Analyzer") as demo:
83
  gr.HTML("""
84
  <div style="text-align: center; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); color: white; padding: 2rem; border-radius: 10px; margin-bottom: 2rem;">
@@ -109,11 +105,7 @@ with gr.Blocks(theme=gr.themes.Soft(), title="AI Symptom Analyzer") as demo:
109
  output_text = gr.Textbox(
110
  label="AI Analysis", lines=25, show_copy_button=True, placeholder="Analysis results will appear here...")
111
 
112
- def clear_all():
113
- # This function should return values for all outputs cleared by the button
114
- return None, ""
115
-
116
- # The clear button now correctly clears the image and text input.
117
  analyze_btn.click(fn=analyze_symptoms, inputs=[image_input, symptoms_input], outputs=output_text)
118
  clear_btn.click(fn=lambda: (None, "", ""), outputs=[image_input, symptoms_input, output_text])
119
 
 
5
  import os
6
  import spaces
7
 
8
+ # --- Initialize the Model Pipeline ---
9
  print("Loading MedGemma model...")
10
  try:
11
  pipe = pipeline(
 
21
  model_loaded = False
22
  print(f"Error loading model: {e}")
23
 
24
+ # --- Core Analysis Function ---
25
  @spaces.GPU()
26
  def analyze_symptoms(symptom_image: Image.Image, symptoms_text: str):
27
  """
28
+ Analyzes user's symptoms using the definitive calling convention.
 
29
  """
30
  if not model_loaded:
31
  return "Error: The AI model could not be loaded. Please check the Space logs."
 
35
  return "Please describe your symptoms or upload an image for analysis."
36
 
37
  try:
 
38
  system_prompt = (
39
  "You are an expert, empathetic AI medical assistant. Analyze the potential "
40
  "medical condition based on the following information. Provide a list of "
 
43
  )
44
 
45
  user_content = []
 
46
  user_content.append({"type": "text", "text": symptoms_text})
47
 
 
48
  if symptom_image:
49
  user_content.append({"type": "image", "image": symptom_image})
50
 
 
53
  {"role": "user", "content": user_content}
54
  ]
55
 
56
+ # *** THE FIX: Increased the token limit to prevent truncated output ***
57
  generation_args = {
58
+ "max_new_tokens": 1024, # Increased from 512 to 1024
59
  "do_sample": True,
60
  "temperature": 0.7,
61
  }
62
 
 
63
  # The entire messages structure is passed to the `text` argument.
64
  output = pipe(text=messages, **generation_args)
65
 
66
+ # Extract the content of the last generated message.
67
  result = output[0]["generated_text"][-1]["content"]
68
 
69
  disclaimer = "\n\n***Disclaimer: I am an AI assistant and not a medical professional. This is not a diagnosis. Please consult a doctor for any health concerns.***"
 
74
  print(f"An exception occurred during analysis: {type(e).__name__}: {e}")
75
  return f"An error occurred during analysis. Please check the logs for details: {str(e)}"
76
 
77
+ # --- Gradio Interface (No changes needed) ---
78
  with gr.Blocks(theme=gr.themes.Soft(), title="AI Symptom Analyzer") as demo:
79
  gr.HTML("""
80
  <div style="text-align: center; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); color: white; padding: 2rem; border-radius: 10px; margin-bottom: 2rem;">
 
105
  output_text = gr.Textbox(
106
  label="AI Analysis", lines=25, show_copy_button=True, placeholder="Analysis results will appear here...")
107
 
108
+ # Event handlers
 
 
 
 
109
  analyze_btn.click(fn=analyze_symptoms, inputs=[image_input, symptoms_input], outputs=output_text)
110
  clear_btn.click(fn=lambda: (None, "", ""), outputs=[image_input, symptoms_input, output_text])
111