Spaces:
Running
Running
import google.generativeai as genai | |
import os | |
import json | |
from dotenv import load_dotenv | |
load_dotenv() | |
# Support multiple Gemini keys (comma-separated or single key) | |
api_keys = os.getenv("GOOGLE_API_KEYS") or os.getenv("GOOGLE_API_KEY") | |
if not api_keys: | |
raise ValueError("No Gemini API keys found in GOOGLE_API_KEYS or GOOGLE_API_KEY environment variable.") | |
api_keys = [k.strip() for k in api_keys.split(",") if k.strip()] | |
print(f"Loaded {len(api_keys)} Gemini API key(s)") | |
def query_gemini(questions, contexts, max_retries=3): | |
import itertools | |
context = "\n\n".join(contexts) | |
questions_text = "\n".join([f"{i+1}. {q}" for i, q in enumerate(questions)]) | |
prompt = f""" | |
You are an expert insurance assistant generating formal yet user-facing answers to policy questions and Other Human Questions. Your goal is to write professional, structured answers that reflect the language of policy documents β but are still human-readable and easy to understand. | |
π§ FORMAT & TONE GUIDELINES: | |
- Write in professional third-person language (no "you", no "we"). | |
- Use clear sentence structure with proper punctuation and spacing. | |
- Do NOT write in legalese or robotic passive constructions. | |
- Include eligibility, limits, and waiting periods explicitly where relevant. | |
- Keep it factual, neutral, and easy to follow. | |
- First, try to answer each question using information from the provided context. | |
- If the question is NOT covered by the context Provide Then Give The General Answer It Not Be In Context if Nothing Found Give Normal Ai Answer for The Question Correctly | |
- Limit each answer to 2β3 sentences, and do not repeat unnecessary information. | |
- If a question can be answered with a simple "Yes", "No", "Can apply", or "Cannot apply", then begin the answer with that phrase, followed by a short supporting Statement In Natural Human Like response.So Give A Good Answer For The Question With Correct Information. | |
- Avoid giving theory Based Long Long answers Try to Give Short Good Reasonable Answers. | |
π DO NOT: | |
- Use words like "context", "document", or "text". | |
- Output markdown, bullets, emojis, or markdown code blocks. | |
- Say "helpful", "available", "allowed", "indemnified", "excluded", etc. | |
- Use overly robotic passive constructions like "shall be indemnified". | |
- Dont Give In Message Like "Based On The Context "Or "Nothing Refered In The context" Like That Dont Give In Response Try To Give Answer For The Question Alone | |
β DO: | |
- Write in clean, informative language. | |
- Give complete answers in 2β3 sentences maximum. | |
π€ OUTPUT FORMAT (strict): | |
Respond with only the following JSON β no explanations, no comments, no markdown: | |
{{ | |
"answers": [ | |
"Answer to question 1", | |
"Answer to question 2", | |
... | |
] | |
}} | |
π CONTEXT: | |
{context} | |
β QUESTIONS: | |
{questions_text} | |
Your task: For each question, provide a complete, professional, and clearly written answer in 2β3 sentences using a formal but readable tone. | |
""" | |
last_exception = None | |
total_attempts = len(api_keys) * max_retries | |
key_cycle = itertools.cycle(api_keys) | |
for attempt in range(total_attempts): | |
key = next(key_cycle) | |
try: | |
genai.configure(api_key=key) | |
model = genai.GenerativeModel("gemini-2.5-flash-lite") | |
response = model.generate_content(prompt) | |
response_text = getattr(response, "text", "").strip() | |
if not response_text: | |
raise ValueError("Empty response received from Gemini API.") | |
if response_text.startswith("```json"): | |
response_text = response_text.replace("```json", "").replace("```", "").strip() | |
elif response_text.startswith("```"): | |
response_text = response_text.replace("```", "").strip() | |
parsed = json.loads(response_text) | |
if "answers" in parsed and isinstance(parsed["answers"], list): | |
return parsed | |
else: | |
raise ValueError("Invalid response format received from Gemini.") | |
except Exception as e: | |
last_exception = e | |
msg = str(e).lower() | |
print(f"[Retry {attempt+1}/{total_attempts}] Gemini key {key[:8]}... failed: {e}") | |
continue | |
print(f"All Gemini API attempts failed. Last error: {last_exception}") | |
return {"answers": [f"Error generating response: {str(last_exception)}"] * len(questions)} | |