Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -25,8 +25,8 @@ except Exception as e:
|
|
25 |
@spaces.GPU()
|
26 |
def analyze_symptoms(symptom_image: Image.Image, symptoms_text: str):
|
27 |
"""
|
28 |
-
Analyzes user's symptoms using
|
29 |
-
|
30 |
"""
|
31 |
if not model_loaded:
|
32 |
return "Error: The AI model could not be loaded. Please check the Space logs."
|
@@ -36,38 +36,35 @@ def analyze_symptoms(symptom_image: Image.Image, symptoms_text: str):
|
|
36 |
return "Please describe your symptoms or upload an image for analysis."
|
37 |
|
38 |
try:
|
39 |
-
# --- DEFINITIVE
|
|
|
|
|
|
|
40 |
system_instruction = (
|
41 |
"You are an expert, empathetic AI medical assistant. "
|
42 |
-
"Analyze the potential medical condition based on the
|
43 |
"Provide a list of possible conditions, your reasoning, and a clear, "
|
44 |
-
"actionable next-steps plan.
|
45 |
-
"the user provided."
|
46 |
)
|
47 |
|
48 |
-
|
49 |
-
user_content = []
|
50 |
|
51 |
-
#
|
52 |
-
|
|
|
53 |
|
54 |
-
# The
|
55 |
-
images_to_pass = []
|
56 |
if symptom_image:
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
messages = [
|
64 |
-
{"role": "system", "content": system_instruction},
|
65 |
-
{"role": "user", "content": user_content},
|
66 |
-
]
|
67 |
|
68 |
-
print("Generating pipeline output with
|
69 |
|
70 |
-
# Generation parameters must be in a
|
71 |
generate_kwargs = {
|
72 |
"max_new_tokens": 512,
|
73 |
"do_sample": True,
|
@@ -75,34 +72,48 @@ def analyze_symptoms(symptom_image: Image.Image, symptoms_text: str):
|
|
75 |
}
|
76 |
|
77 |
# --- THE DEFINITIVE PIPELINE CALL ---
|
78 |
-
#
|
79 |
-
#
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
87 |
|
88 |
print("Pipeline Output:", output)
|
89 |
|
90 |
-
# ---
|
91 |
-
#
|
92 |
-
#
|
93 |
if output and isinstance(output, list) and 'generated_text' in output[0]:
|
94 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
95 |
else:
|
96 |
result = "The model did not return a valid response. Please try again."
|
97 |
|
98 |
disclaimer = "\n\n***Disclaimer: I am an AI assistant and not a medical professional. This is not a diagnosis. Please consult a doctor for any health concerns.***"
|
99 |
|
100 |
-
return result + disclaimer
|
101 |
|
102 |
except Exception as e:
|
103 |
print(f"An exception occurred during analysis: {type(e).__name__}: {e}")
|
104 |
return f"An error occurred during analysis. Please check the logs for details: {str(e)}"
|
105 |
|
|
|
106 |
# --- Gradio Interface (No changes needed) ---
|
107 |
with gr.Blocks(theme=gr.themes.Soft(), title="AI Symptom Analyzer") as demo:
|
108 |
gr.HTML("""
|
|
|
25 |
@spaces.GPU()
|
26 |
def analyze_symptoms(symptom_image: Image.Image, symptoms_text: str):
|
27 |
"""
|
28 |
+
Analyzes user's symptoms using a single, formatted prompt string, which is the
|
29 |
+
most reliable method for this pipeline.
|
30 |
"""
|
31 |
if not model_loaded:
|
32 |
return "Error: The AI model could not be loaded. Please check the Space logs."
|
|
|
36 |
return "Please describe your symptoms or upload an image for analysis."
|
37 |
|
38 |
try:
|
39 |
+
# --- DEFINITIVE PROMPT STRING LOGIC ---
|
40 |
+
# We construct a single string with special tokens that the model understands.
|
41 |
+
# This is more robust than the 'messages' format for this pipeline.
|
42 |
+
|
43 |
system_instruction = (
|
44 |
"You are an expert, empathetic AI medical assistant. "
|
45 |
+
"Analyze the potential medical condition based on the following information. "
|
46 |
"Provide a list of possible conditions, your reasoning, and a clear, "
|
47 |
+
"actionable next-steps plan."
|
|
|
48 |
)
|
49 |
|
50 |
+
prompt_parts = ["<start_of_turn>user"]
|
|
|
51 |
|
52 |
+
# Combine the user's text and the system instruction.
|
53 |
+
full_user_prompt = f"{symptoms_text}\n\n{system_instruction}"
|
54 |
+
prompt_parts.append(full_user_prompt)
|
55 |
|
56 |
+
# The <image> token is a placeholder that tells the model where to "look" at the image.
|
|
|
57 |
if symptom_image:
|
58 |
+
prompt_parts.append("<image>")
|
59 |
+
|
60 |
+
prompt_parts.append("<start_of_turn>model")
|
61 |
+
|
62 |
+
# Join all parts into the final prompt string.
|
63 |
+
prompt = "\n".join(prompt_parts)
|
|
|
|
|
|
|
|
|
64 |
|
65 |
+
print(f"Generating pipeline output with prompt:\n{prompt}")
|
66 |
|
67 |
+
# Generation parameters must be in a 'generate_kwargs' dictionary.
|
68 |
generate_kwargs = {
|
69 |
"max_new_tokens": 512,
|
70 |
"do_sample": True,
|
|
|
72 |
}
|
73 |
|
74 |
# --- THE DEFINITIVE PIPELINE CALL ---
|
75 |
+
# We pass the prompt string and, if available, the image list.
|
76 |
+
# This calling signature correctly provides the data to the preprocessor.
|
77 |
+
|
78 |
+
if symptom_image:
|
79 |
+
# Provide both the image data and the text prompt.
|
80 |
+
output = pipe(
|
81 |
+
images=[symptom_image],
|
82 |
+
prompt=prompt,
|
83 |
+
generate_kwargs=generate_kwargs
|
84 |
+
)
|
85 |
+
else:
|
86 |
+
# Provide only the text prompt.
|
87 |
+
output = pipe(
|
88 |
+
prompt=prompt,
|
89 |
+
generate_kwargs=generate_kwargs
|
90 |
+
)
|
91 |
|
92 |
print("Pipeline Output:", output)
|
93 |
|
94 |
+
# --- CORRECTED OUTPUT PROCESSING ---
|
95 |
+
# The output contains the full prompt plus the response. We need to split
|
96 |
+
# the response from the prompt.
|
97 |
if output and isinstance(output, list) and 'generated_text' in output[0]:
|
98 |
+
full_text = output[0]['generated_text']
|
99 |
+
# The model's response comes after the final '<start_of_turn>model' token.
|
100 |
+
parts = full_text.split("<start_of_turn>model\n")
|
101 |
+
if len(parts) > 1:
|
102 |
+
result = parts[1]
|
103 |
+
else:
|
104 |
+
result = full_text # Fallback in case of unexpected format
|
105 |
else:
|
106 |
result = "The model did not return a valid response. Please try again."
|
107 |
|
108 |
disclaimer = "\n\n***Disclaimer: I am an AI assistant and not a medical professional. This is not a diagnosis. Please consult a doctor for any health concerns.***"
|
109 |
|
110 |
+
return result.strip() + disclaimer
|
111 |
|
112 |
except Exception as e:
|
113 |
print(f"An exception occurred during analysis: {type(e).__name__}: {e}")
|
114 |
return f"An error occurred during analysis. Please check the logs for details: {str(e)}"
|
115 |
|
116 |
+
|
117 |
# --- Gradio Interface (No changes needed) ---
|
118 |
with gr.Blocks(theme=gr.themes.Soft(), title="AI Symptom Analyzer") as demo:
|
119 |
gr.HTML("""
|