hponepyae commited on
Commit
2588693
·
verified ·
1 Parent(s): e8467c6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +49 -38
app.py CHANGED
@@ -25,8 +25,8 @@ except Exception as e:
25
  @spaces.GPU()
26
  def analyze_symptoms(symptom_image: Image.Image, symptoms_text: str):
27
  """
28
- Analyzes user's symptoms using the definitive correct calling convention for
29
- multimodal chat models in Hugging Face pipelines.
30
  """
31
  if not model_loaded:
32
  return "Error: The AI model could not be loaded. Please check the Space logs."
@@ -36,38 +36,35 @@ def analyze_symptoms(symptom_image: Image.Image, symptoms_text: str):
36
  return "Please describe your symptoms or upload an image for analysis."
37
 
38
  try:
39
- # --- DEFINITIVE CHAT-BASED PROMPT LOGIC ---
 
 
 
40
  system_instruction = (
41
  "You are an expert, empathetic AI medical assistant. "
42
- "Analyze the potential medical condition based on the user's input. "
43
  "Provide a list of possible conditions, your reasoning, and a clear, "
44
- "actionable next-steps plan. Begin your analysis by describing the information "
45
- "the user provided."
46
  )
47
 
48
- # The user's content is a list of dictionaries describing the multimodal input.
49
- user_content = []
50
 
51
- # The text part of the prompt.
52
- user_content.append({"type": "text", "text": symptoms_text})
 
53
 
54
- # The images to be passed to the pipeline.
55
- images_to_pass = []
56
  if symptom_image:
57
- # 1. Add a placeholder to the content list.
58
- user_content.append({"type": "image"})
59
- # 2. Add the actual PIL image object to a separate list.
60
- images_to_pass.append(symptom_image)
61
-
62
- # Construct the final messages list for the chat template.
63
- messages = [
64
- {"role": "system", "content": system_instruction},
65
- {"role": "user", "content": user_content},
66
- ]
67
 
68
- print("Generating pipeline output with separate image argument...")
69
 
70
- # Generation parameters must be in a `generate_kwargs` dictionary.
71
  generate_kwargs = {
72
  "max_new_tokens": 512,
73
  "do_sample": True,
@@ -75,34 +72,48 @@ def analyze_symptoms(symptom_image: Image.Image, symptoms_text: str):
75
  }
76
 
77
  # --- THE DEFINITIVE PIPELINE CALL ---
78
- # The pipeline call requires three distinct things:
79
- # 1. The `messages` list describing the chat structure.
80
- # 2. The `images` list containing the actual PIL image data.
81
- # 3. The `generate_kwargs` dictionary for generation parameters.
82
- output = pipe(
83
- messages,
84
- images=images_to_pass,
85
- generate_kwargs=generate_kwargs
86
- )
 
 
 
 
 
 
 
87
 
88
  print("Pipeline Output:", output)
89
 
90
- # --- SIMPLIFIED OUTPUT PROCESSING ---
91
- # For this pipeline, the output is a list with one dictionary.
92
- # The 'generated_text' key contains the AI's response string directly.
93
  if output and isinstance(output, list) and 'generated_text' in output[0]:
94
- result = output[0]['generated_text']
 
 
 
 
 
 
95
  else:
96
  result = "The model did not return a valid response. Please try again."
97
 
98
  disclaimer = "\n\n***Disclaimer: I am an AI assistant and not a medical professional. This is not a diagnosis. Please consult a doctor for any health concerns.***"
99
 
100
- return result + disclaimer
101
 
102
  except Exception as e:
103
  print(f"An exception occurred during analysis: {type(e).__name__}: {e}")
104
  return f"An error occurred during analysis. Please check the logs for details: {str(e)}"
105
 
 
106
  # --- Gradio Interface (No changes needed) ---
107
  with gr.Blocks(theme=gr.themes.Soft(), title="AI Symptom Analyzer") as demo:
108
  gr.HTML("""
 
25
  @spaces.GPU()
26
  def analyze_symptoms(symptom_image: Image.Image, symptoms_text: str):
27
  """
28
+ Analyzes user's symptoms using a single, formatted prompt string, which is the
29
+ most reliable method for this pipeline.
30
  """
31
  if not model_loaded:
32
  return "Error: The AI model could not be loaded. Please check the Space logs."
 
36
  return "Please describe your symptoms or upload an image for analysis."
37
 
38
  try:
39
+ # --- DEFINITIVE PROMPT STRING LOGIC ---
40
+ # We construct a single string with special tokens that the model understands.
41
+ # This is more robust than the 'messages' format for this pipeline.
42
+
43
  system_instruction = (
44
  "You are an expert, empathetic AI medical assistant. "
45
+ "Analyze the potential medical condition based on the following information. "
46
  "Provide a list of possible conditions, your reasoning, and a clear, "
47
+ "actionable next-steps plan."
 
48
  )
49
 
50
+ prompt_parts = ["<start_of_turn>user"]
 
51
 
52
+ # Combine the user's text and the system instruction.
53
+ full_user_prompt = f"{symptoms_text}\n\n{system_instruction}"
54
+ prompt_parts.append(full_user_prompt)
55
 
56
+ # The <image> token is a placeholder that tells the model where to "look" at the image.
 
57
  if symptom_image:
58
+ prompt_parts.append("<image>")
59
+
60
+ prompt_parts.append("<start_of_turn>model")
61
+
62
+ # Join all parts into the final prompt string.
63
+ prompt = "\n".join(prompt_parts)
 
 
 
 
64
 
65
+ print(f"Generating pipeline output with prompt:\n{prompt}")
66
 
67
+ # Generation parameters must be in a 'generate_kwargs' dictionary.
68
  generate_kwargs = {
69
  "max_new_tokens": 512,
70
  "do_sample": True,
 
72
  }
73
 
74
  # --- THE DEFINITIVE PIPELINE CALL ---
75
+ # We pass the prompt string and, if available, the image list.
76
+ # This calling signature correctly provides the data to the preprocessor.
77
+
78
+ if symptom_image:
79
+ # Provide both the image data and the text prompt.
80
+ output = pipe(
81
+ images=[symptom_image],
82
+ prompt=prompt,
83
+ generate_kwargs=generate_kwargs
84
+ )
85
+ else:
86
+ # Provide only the text prompt.
87
+ output = pipe(
88
+ prompt=prompt,
89
+ generate_kwargs=generate_kwargs
90
+ )
91
 
92
  print("Pipeline Output:", output)
93
 
94
+ # --- CORRECTED OUTPUT PROCESSING ---
95
+ # The output contains the full prompt plus the response. We need to split
96
+ # the response from the prompt.
97
  if output and isinstance(output, list) and 'generated_text' in output[0]:
98
+ full_text = output[0]['generated_text']
99
+ # The model's response comes after the final '<start_of_turn>model' token.
100
+ parts = full_text.split("<start_of_turn>model\n")
101
+ if len(parts) > 1:
102
+ result = parts[1]
103
+ else:
104
+ result = full_text # Fallback in case of unexpected format
105
  else:
106
  result = "The model did not return a valid response. Please try again."
107
 
108
  disclaimer = "\n\n***Disclaimer: I am an AI assistant and not a medical professional. This is not a diagnosis. Please consult a doctor for any health concerns.***"
109
 
110
+ return result.strip() + disclaimer
111
 
112
  except Exception as e:
113
  print(f"An exception occurred during analysis: {type(e).__name__}: {e}")
114
  return f"An error occurred during analysis. Please check the logs for details: {str(e)}"
115
 
116
+
117
  # --- Gradio Interface (No changes needed) ---
118
  with gr.Blocks(theme=gr.themes.Soft(), title="AI Symptom Analyzer") as demo:
119
  gr.HTML("""