File size: 5,589 Bytes
ec96972
 
 
eb87b3b
ec96972
 
 
 
 
 
 
 
 
 
 
eb87b3b
 
 
ec96972
eb87b3b
 
ec96972
eb87b3b
 
 
ec96972
eb87b3b
 
ec96972
 
 
eb87b3b
 
ec96972
eb87b3b
 
 
 
 
ec96972
eb87b3b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ec96972
 
eb87b3b
ec96972
 
eb87b3b
 
c89a7bc
 
ec96972
 
eb87b3b
 
 
 
 
 
ec96972
 
eb87b3b
 
 
 
 
ec96972
eb87b3b
ec96972
 
 
 
 
 
 
 
 
 
eb87b3b
 
 
 
 
 
ec96972
 
 
eb87b3b
 
ec96972
eb87b3b
 
 
 
ec96972
 
 
eb87b3b
 
ec96972
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
import google.generativeai as genai
import os
import json
import time
from dotenv import load_dotenv
load_dotenv()

api_key = os.getenv("GOOGLE_API_KEY")
if not api_key:
    raise ValueError("GOOGLE_API_KEY environment variable is not set. Please add it to your .env file")

print(f"Google API Key loaded: {api_key[:10]}..." if api_key else "No API key found")
genai.configure(api_key=api_key)

def query_gemini(questions, contexts):
    start_time = time.time()
    print(f"Starting LLM processing for {len(questions)} questions with {len(contexts)} context chunks")
    
    try:
        # Time context preparation
        context_start = time.time()
        context = "\n\n".join(contexts)
        context_time = time.time() - context_start
        print(f"Context preparation took: {context_time:.2f} seconds")
        print(f"Total context length: {len(context)} characters")
        
        # Time prompt preparation
        prompt_start = time.time()
        # Create a numbered list of questions
        questions_text = "\n".join([f"{i+1}. {q}" for i, q in enumerate(questions)])
        
        prompt = f"""
You are an intelligent insurance assistant trained to answer questions using insurance documents. Based on the context provided below, respond to each question with a **well-informed, complete, and professionally worded answer**.

🎯 SCORING & OUTPUT GOAL:
- Responses are part of an evaluated system.
- Each answer should be **accurate**, **complete**, and **well-phrased** β€” ideally around **1–2 full sentences**.
- Avoid short/fragmented answers or long multi-paragraph explanations.
- Always write like an insurance advisor addressing a customer clearly.

πŸ“˜ INSTRUCTIONS:
1. **Only use the provided context** to answer each question. If the answer is not found, respond with exactly: `"Not Found"`.
2. Keep answers concise **but not vague**. Include all **key points** (such as limits, durations, conditions) in one or two complete sentences.
3. DO NOT use bullet points, partial phrases, or excessive legal text. DO NOT repeat the question in the answer.
4. Match the tone and format of these examples:
   - "A grace period of thirty days is provided for premium payment after the due date to renew or continue the policy without losing continuity benefits."
   - "Yes, the policy covers maternity expenses, including childbirth and lawful medical termination of pregnancy. To be eligible, the female insured person must have been continuously covered for at least 24 months. The benefit is limited to two deliveries or terminations during the policy period."
   - "Yes, the policy indemnifies the medical expenses for the organ donor's hospitalization for the purpose of harvesting the organ, provided the organ is for an insured person and the donation complies with the Transplantation of Human Organs Act, 1994."
   - "Not Found"

πŸ“€ RETURN FORMAT:
Respond strictly using this JSON structure:

{{
  "answers": [
    "Answer to question 1",
    "Answer to question 2",
    ...
  ]
}}

πŸ“š CONTEXT:
{context}

❓ QUESTIONS:
{questions_text}

Your task: Provide accurate, refined answers based on the document context above. Use the tone and structure shown. Be concise but thorough. Only include what is supported in the context. Use "Not Found" if the answer is missing.
"""




        prompt_time = time.time() - prompt_start
        print(f"Prompt preparation took: {prompt_time:.2f} seconds")
        print(f"Total prompt length: {len(prompt)} characters")
        
        # Time model initialization and API call
        api_start = time.time()
        model = genai.GenerativeModel('gemini-2.0-flash-exp')
        response = model.generate_content(prompt)
        api_time = time.time() - api_start
        print(f"Gemini API call took: {api_time:.2f} seconds")
        
        # Time response processing
        process_start = time.time()
        response_text = response.text.strip()
        print(f"Raw response length: {len(response_text)} characters")
        
        # Try to parse the response as JSON
        try:
            # Remove any markdown code blocks if present
            if response_text.startswith("```json"):
                response_text = response_text.replace("```json", "").replace("```", "").strip()
            elif response_text.startswith("```"):
                response_text = response_text.replace("```", "").strip()
            
            parsed_response = json.loads(response_text)
            process_time = time.time() - process_start
            print(f"Response processing took: {process_time:.2f} seconds")
            
            total_time = time.time() - start_time
            print(f"Total LLM processing took: {total_time:.2f} seconds")
            
            return parsed_response
        except json.JSONDecodeError:
            # If JSON parsing fails, return a structured response
            process_time = time.time() - process_start
            print(f"Response processing took: {process_time:.2f} seconds (JSON parsing failed)")
            print(f"Failed to parse JSON response: {response_text}")
            
            total_time = time.time() - start_time
            print(f"Total LLM processing took: {total_time:.2f} seconds")
            
            return {"answers": ["Error parsing response"] * len(questions)}
            
    except Exception as e:
        total_time = time.time() - start_time
        print(f"Error in query_gemini after {total_time:.2f} seconds: {str(e)}")
        return {"answers": [f"Error generating response: {str(e)}"] * len(questions)}