Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -25,8 +25,8 @@ except Exception as e:
|
|
25 |
@spaces.GPU()
|
26 |
def analyze_symptoms(symptom_image: Image.Image, symptoms_text: str):
|
27 |
"""
|
28 |
-
Analyzes user's symptoms using the
|
29 |
-
|
30 |
"""
|
31 |
if not model_loaded:
|
32 |
return "Error: The AI model could not be loaded. Please check the Space logs."
|
@@ -36,7 +36,7 @@ def analyze_symptoms(symptom_image: Image.Image, symptoms_text: str):
|
|
36 |
return "Please describe your symptoms or upload an image for analysis."
|
37 |
|
38 |
try:
|
39 |
-
# --- CHAT-BASED PROMPT LOGIC
|
40 |
system_instruction = (
|
41 |
"You are an expert, empathetic AI medical assistant. "
|
42 |
"Analyze the potential medical condition based on the user's input. "
|
@@ -45,42 +45,53 @@ def analyze_symptoms(symptom_image: Image.Image, symptoms_text: str):
|
|
45 |
"the user provided."
|
46 |
)
|
47 |
|
|
|
48 |
user_content = []
|
49 |
-
|
50 |
-
|
|
|
51 |
|
|
|
|
|
52 |
if symptom_image:
|
53 |
-
|
|
|
|
|
|
|
54 |
|
|
|
55 |
messages = [
|
56 |
{"role": "system", "content": system_instruction},
|
57 |
{"role": "user", "content": user_content},
|
58 |
]
|
59 |
|
60 |
-
print("Generating pipeline output with
|
61 |
|
62 |
-
#
|
63 |
-
# All text-generation parameters must be nested within a 'generate_kwargs' dictionary.
|
64 |
generate_kwargs = {
|
65 |
"max_new_tokens": 512,
|
66 |
"do_sample": True,
|
67 |
"temperature": 0.7,
|
68 |
}
|
69 |
-
|
70 |
-
#
|
71 |
-
#
|
72 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
73 |
|
74 |
print("Pipeline Output:", output)
|
75 |
|
76 |
-
# --- OUTPUT PROCESSING
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
result = assistant_message['content']
|
82 |
-
else:
|
83 |
-
result = str(assistant_message)
|
84 |
else:
|
85 |
result = "The model did not return a valid response. Please try again."
|
86 |
|
|
|
25 |
@spaces.GPU()
|
26 |
def analyze_symptoms(symptom_image: Image.Image, symptoms_text: str):
|
27 |
"""
|
28 |
+
Analyzes user's symptoms using the definitive correct calling convention for
|
29 |
+
multimodal chat models in Hugging Face pipelines.
|
30 |
"""
|
31 |
if not model_loaded:
|
32 |
return "Error: The AI model could not be loaded. Please check the Space logs."
|
|
|
36 |
return "Please describe your symptoms or upload an image for analysis."
|
37 |
|
38 |
try:
|
39 |
+
# --- DEFINITIVE CHAT-BASED PROMPT LOGIC ---
|
40 |
system_instruction = (
|
41 |
"You are an expert, empathetic AI medical assistant. "
|
42 |
"Analyze the potential medical condition based on the user's input. "
|
|
|
45 |
"the user provided."
|
46 |
)
|
47 |
|
48 |
+
# The user's content is a list of dictionaries describing the multimodal input.
|
49 |
user_content = []
|
50 |
+
|
51 |
+
# The text part of the prompt.
|
52 |
+
user_content.append({"type": "text", "text": symptoms_text})
|
53 |
|
54 |
+
# The images to be passed to the pipeline.
|
55 |
+
images_to_pass = []
|
56 |
if symptom_image:
|
57 |
+
# 1. Add a placeholder to the content list.
|
58 |
+
user_content.append({"type": "image"})
|
59 |
+
# 2. Add the actual PIL image object to a separate list.
|
60 |
+
images_to_pass.append(symptom_image)
|
61 |
|
62 |
+
# Construct the final messages list for the chat template.
|
63 |
messages = [
|
64 |
{"role": "system", "content": system_instruction},
|
65 |
{"role": "user", "content": user_content},
|
66 |
]
|
67 |
|
68 |
+
print("Generating pipeline output with separate image argument...")
|
69 |
|
70 |
+
# Generation parameters must be in a `generate_kwargs` dictionary.
|
|
|
71 |
generate_kwargs = {
|
72 |
"max_new_tokens": 512,
|
73 |
"do_sample": True,
|
74 |
"temperature": 0.7,
|
75 |
}
|
76 |
+
|
77 |
+
# --- THE DEFINITIVE PIPELINE CALL ---
|
78 |
+
# The pipeline call requires three distinct things:
|
79 |
+
# 1. The `messages` list describing the chat structure.
|
80 |
+
# 2. The `images` list containing the actual PIL image data.
|
81 |
+
# 3. The `generate_kwargs` dictionary for generation parameters.
|
82 |
+
output = pipe(
|
83 |
+
messages,
|
84 |
+
images=images_to_pass,
|
85 |
+
generate_kwargs=generate_kwargs
|
86 |
+
)
|
87 |
|
88 |
print("Pipeline Output:", output)
|
89 |
|
90 |
+
# --- SIMPLIFIED OUTPUT PROCESSING ---
|
91 |
+
# For this pipeline, the output is a list with one dictionary.
|
92 |
+
# The 'generated_text' key contains the AI's response string directly.
|
93 |
+
if output and isinstance(output, list) and 'generated_text' in output[0]:
|
94 |
+
result = output[0]['generated_text']
|
|
|
|
|
|
|
95 |
else:
|
96 |
result = "The model did not return a valid response. Please try again."
|
97 |
|