GoldsWolf commited on
Commit
b4defcb
·
1 Parent(s): 9a880aa

changes to different files

Browse files
Files changed (7) hide show
  1. agent.py +95 -277
  2. app.py +7 -1
  3. metadata.jsonl +0 -0
  4. requirements.txt +11 -7
  5. supabase_docs.csv +0 -0
  6. system_prompt.txt +5 -10
  7. test.ipynb +684 -0
agent.py CHANGED
@@ -1,306 +1,109 @@
 
1
  import os
2
- from langchain.tools import tool
3
- from typing import Union, List
4
- from decimal import Decimal, getcontext
5
  from dotenv import load_dotenv
6
- from langchain_community.utilities import WikipediaAPIWrapper
7
- import warnings
8
- import wikipedia
9
- from bs4 import BeautifulSoup
10
- from langchain_community.tools import DuckDuckGoSearchRun
11
- import requests
12
- from typing import Optional
13
- import re
14
- from langchain_community.document_loaders import ArxivLoader
15
- from langchain_google_genai import ChatGoogleGenerativeAI
16
- from langchain_groq import ChatGroq
17
- from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint, HuggingFaceEmbeddings
18
- from langchain_core.messages import SystemMessage, HumanMessage
19
  from langgraph.graph import START, StateGraph, MessagesState
20
  from langgraph.prebuilt import tools_condition
21
  from langgraph.prebuilt import ToolNode
 
 
 
 
 
 
22
  from langchain_community.vectorstores import SupabaseVectorStore
23
- from supabase.client import Client, create_client
 
24
  from langchain.tools.retriever import create_retriever_tool
 
25
 
26
- # Initialize search tool
27
- search_tool = DuckDuckGoSearchRun()
28
- getcontext().prec = 10
29
- # Initial configuration
30
- load_dotenv() # Load environment variables
31
-
32
- # Fix for parser warning
33
- wikipedia.wikipedia._BeautifulSoup = lambda html: BeautifulSoup(html, 'html.parser')
34
- warnings.filterwarnings("ignore", category=UserWarning, module="wikipedia")
35
-
36
- def configure_wikipedia(language: str = 'en', top_k_results: int = 3, max_chars: int = 4000):
37
- """Configure Wikipedia search settings
38
-
39
- Args:
40
- language (str): Search language (default 'en')
41
- top_k_results (int): Number of results to return
42
- max_chars (int): Maximum character limit per result
43
-
44
- Returns:
45
- WikipediaAPIWrapper: Configured WikipediaAPIWrapper instance
46
- """
47
- wikipedia.set_lang(language)
48
- return WikipediaAPIWrapper(
49
- wiki_client=wikipedia,
50
- top_k_results=top_k_results,
51
- doc_content_chars_max=max_chars
52
- )
53
-
54
- def format_search_result(raw_result: str) -> str:
55
- """Format Wikipedia search results for better readability
56
-
57
- Args:
58
- raw_result (str): Raw output from WikipediaAPIWrapper
59
-
60
- Returns:
61
- str: Formatted search result
62
- """
63
- if "Page: " in raw_result and "Summary: " in raw_result:
64
- parts = raw_result.split("Summary: ")
65
- page_part = parts[0].replace("Page: ", "").strip()
66
- summary_part = parts[1].strip()
67
- return f"📚 Page: {page_part}\n\n📝 Summary: {summary_part}"
68
- return raw_result
69
-
70
- def search_wikipedia(query: str, language: str = 'en') -> str:
71
- """Perform Wikipedia searches with error handling
72
-
73
- Args:
74
- query (str): Search term
75
- language (str): Search language (optional)
76
-
77
- Returns:
78
- str: Formatted result or error message
79
- """
80
- try:
81
- wikipedia_tool = configure_wikipedia(language=language)
82
- result = wikipedia_tool.run(query)
83
- return format_search_result(result)
84
- except Exception as e:
85
- return f"Search error: {str(e)}"
86
 
87
  @tool
88
- def add(a: Union[int, float], b: Union[int, float]) -> Union[int, float]:
89
- """Add two numbers together.
90
-
91
  Args:
92
- a (Union[int, float]): First number
93
- b (Union[int, float]): Second number
94
-
95
- Returns:
96
- Union[int, float]: Sum of a and b
97
  """
98
- return float(Decimal(str(a)) + Decimal(str(b)))
99
 
100
  @tool
101
- def subtract(a: Union[int, float], b: Union[int, float]) -> Union[int, float]:
102
- """Subtract b from a.
103
 
104
  Args:
105
- a (Union[int, float]): Minuend
106
- b (Union[int, float]): Subtrahend
107
-
108
- Returns:
109
- Union[int, float]: Difference between a and b
110
  """
111
- return float(Decimal(str(a)) - Decimal(str(b)))
112
 
113
  @tool
114
- def multiply(a: Union[int, float], b: Union[int, float]) -> Union[int, float]:
115
- """Multiply two numbers.
116
 
117
  Args:
118
- a (Union[int, float]): First factor
119
- b (Union[int, float]): Second factor
120
-
121
- Returns:
122
- Union[int, float]: Product of a and b
123
  """
124
- return float(Decimal(str(a)) * Decimal(str(b)))
125
 
126
  @tool
127
- def divide(a: Union[int, float], b: Union[int, float]) -> float:
128
- """Divide a by b.
129
 
130
  Args:
131
- a (Union[int, float]): Dividend
132
- b (Union[int, float]): Divisor
133
-
134
- Returns:
135
- float: Quotient of a divided by b
136
-
137
- Raises:
138
- ValueError: If b is zero
139
  """
140
  if b == 0:
141
- raise ValueError("Cannot divide by zero")
142
- return float(Decimal(str(a)) / Decimal(str(b)))
143
 
144
  @tool
145
  def modulus(a: int, b: int) -> int:
146
- """Find the remainder of a divided by b.
147
 
148
  Args:
149
- a (int): Dividend
150
- b (int): Divisor
151
-
152
- Returns:
153
- int: Remainder of a divided by b
154
-
155
- Raises:
156
- ValueError: If b is zero
157
  """
158
- if b == 0:
159
- raise ValueError("Cannot divide by zero for modulus")
160
  return a % b
161
 
162
  @tool
163
- def power(base: Union[int, float], exponent: Union[int, float]) -> Union[int, float]:
164
- """Raise base to the power of exponent.
165
-
166
- Args:
167
- base (Union[int, float]): The base number
168
- exponent (Union[int, float]): The exponent
169
-
170
- Returns:
171
- Union[int, float]: Result of base^exponent
172
- """
173
- return float(Decimal(str(base)) ** Decimal(str(exponent)))
174
-
175
- @tool
176
- def square_root(x: Union[int, float]) -> float:
177
- """Calculate the square root of a number.
178
-
179
- Args:
180
- x (Union[int, float]): Number to find the square root of
181
-
182
- Returns:
183
- float: Square root of x
184
-
185
- Raises:
186
- ValueError: If x is negative
187
- """
188
- if x < 0:
189
- raise ValueError("Cannot calculate square root of negative number")
190
- return float(Decimal(str(x)).sqrt())
191
-
192
- @tool
193
- def average(numbers: List[Union[int, float]]) -> float:
194
- """Calculate the average of a list of numbers.
195
 
196
  Args:
197
- numbers (List[Union[int, float]]): List of numbers
198
-
199
- Returns:
200
- float: Average of the numbers
201
-
202
- Raises:
203
- ValueError: If list is empty
204
- """
205
- if not numbers:
206
- raise ValueError("Cannot calculate average of empty list")
207
- return float(sum(Decimal(str(n)) for n in numbers) / Decimal(len(numbers)))
208
-
209
- @tool
210
- def percentage(value: Union[int, float], percent: Union[int, float]) -> float:
211
- """Calculate percentage of a value.
212
-
213
- Args:
214
- value (Union[int, float]): Base value
215
- percent (Union[int, float]): Percentage to calculate
216
-
217
- Returns:
218
- float: Result of value * (percent/100)
219
- """
220
- return float(Decimal(str(value)) * (Decimal(str(percent)) / Decimal(100)))
221
-
222
- @tool
223
- def web_search(query: str, site: Optional[str] = None, max_results: int = 5) -> str:
224
- """Perform internet searches. Can search the entire web or specific websites.
225
-
226
- Args:
227
- query (str): Search terms
228
- site (Optional[str]): Specific website to search (e.g., 'wikipedia.org')
229
- max_results (int): Maximum number of results to return
230
-
231
- Returns:
232
- str: Formatted search results
233
- """
234
- try:
235
- if site and not re.match(r'^[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$', site.split('/')[0]):
236
- return "Error: Invalid website format. Use 'domain.ext'"
237
-
238
- search_query = f"{query} site:{site}" if site else query
239
- results = search_tool.run(search_query)
240
-
241
- formatted = []
242
- for i, result in enumerate(results.split('\n\n')[:max_results]):
243
- if result.strip():
244
- formatted.append(f"{i+1}. {result.strip()}")
245
-
246
- header = f"Results from {site}" if site else "Search results"
247
- return f"{header}:\n\n" + '\n\n'.join(formatted) if formatted else "No results found"
248
-
249
- except Exception as e:
250
- return f"Search error: {str(e)}"
251
 
252
  @tool
253
- def scrape_page(url: str, search_term: Optional[str] = None, max_length: int = 3000) -> str:
254
- """Extract content from a specific webpage, optionally filtering by search term.
255
 
256
  Args:
257
- url (str): Full URL of the page to scrape
258
- search_term (Optional[str]): Term to search within the page content
259
- max_length (int): Maximum character length of returned content
260
-
261
- Returns:
262
- str: Relevant page content with source attribution
263
- """
264
- try:
265
- if not re.match(r'^https?://[^\s/$.?#].[^\s]*$', url):
266
- return "Error: Invalid URL format"
267
-
268
- headers = {
269
- 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
270
- }
271
- response = requests.get(url, headers=headers, timeout=15)
272
- response.raise_for_status()
273
-
274
- soup = BeautifulSoup(response.text, 'html.parser')
275
-
276
- for element in soup(['script', 'style', 'nav', 'footer', 'iframe', 'img']):
277
- element.decompose()
278
-
279
- text = '\n'.join(line.strip() for line in soup.get_text().split('\n') if line.strip())
280
-
281
- if search_term:
282
- lines = [line for line in text.split('\n') if search_term.lower() in line.lower()]
283
- text = '\n'.join(lines[:15])
284
-
285
- text = text[:max_length] + ('...' if len(text) > max_length else '')
286
-
287
- return f"Content from {url}:\n\n{text}"
288
-
289
- except requests.exceptions.RequestException as e:
290
- return f"Network error: {str(e)}"
291
- except Exception as e:
292
- return f"Scraping error: {str(e)}"
293
 
294
  @tool
295
  def arvix_search(query: str) -> str:
296
- """Search Arxiv for a query and return maximum 3 results.
297
 
298
  Args:
299
- query (str): The search query
300
-
301
- Returns:
302
- str: Formatted search results
303
- """
304
  search_docs = ArxivLoader(query=query, load_max_docs=3).load()
305
  formatted_search_docs = "\n\n---\n\n".join(
306
  [
@@ -309,22 +112,25 @@ def arvix_search(query: str) -> str:
309
  ])
310
  return {"arvix_results": formatted_search_docs}
311
 
312
- # Load system prompt
313
- with open("prompt.txt", "r", encoding="utf-8") as f:
 
 
314
  system_prompt = f.read()
315
 
 
316
  sys_msg = SystemMessage(content=system_prompt)
317
 
318
- # Build retriever
319
- embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2")
320
  supabase: Client = create_client(
321
  os.environ.get("SUPABASE_URL"),
322
  os.environ.get("SUPABASE_SERVICE_KEY"))
323
  vector_store = SupabaseVectorStore(
324
  client=supabase,
325
- embedding=embeddings,
326
  table_name="documents",
327
- query_name="match_documents",
328
  )
329
  create_retriever_tool = create_retriever_tool(
330
  retriever=vector_store.as_retriever(),
@@ -332,28 +138,31 @@ create_retriever_tool = create_retriever_tool(
332
  description="A tool to retrieve similar questions from a vector store.",
333
  )
334
 
 
 
335
  tools = [
336
- arvix_search,
337
- scrape_page,
338
- web_search,
339
- percentage,
340
- average,
341
- square_root,
342
- power,
343
- modulus,
344
- divide,
345
  multiply,
346
- subtract,
347
  add,
 
 
 
 
 
 
348
  ]
349
 
 
350
  def build_graph(provider: str = "groq"):
351
  """Build the graph"""
 
352
  if provider == "google":
 
353
  llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash", temperature=0)
354
  elif provider == "groq":
355
- llm = ChatGroq(model="qwen-qwq-32b", temperature=0)
 
356
  elif provider == "huggingface":
 
357
  llm = ChatHuggingFace(
358
  llm=HuggingFaceEndpoint(
359
  url="https://api-inference.huggingface.co/models/Meta-DeepLearning/llama-2-7b-chat-hf",
@@ -362,13 +171,16 @@ def build_graph(provider: str = "groq"):
362
  )
363
  else:
364
  raise ValueError("Invalid provider. Choose 'google', 'groq' or 'huggingface'.")
365
-
366
  llm_with_tools = llm.bind_tools(tools)
367
 
 
368
  def assistant(state: MessagesState):
 
369
  return {"messages": [llm_with_tools.invoke(state["messages"])]}
370
 
371
  def retriever(state: MessagesState):
 
372
  similar_question = vector_store.similarity_search(state["messages"][0].content)
373
  example_msg = HumanMessage(
374
  content=f"Here I provide a similar question and answer for reference: \n\n{similar_question[0].page_content}",
@@ -387,10 +199,16 @@ def build_graph(provider: str = "groq"):
387
  )
388
  builder.add_edge("tools", "assistant")
389
 
 
390
  return builder.compile()
391
 
 
392
  if __name__ == "__main__":
393
- app = build_graph(provider="huggingface")
394
- inputs = {"messages": [HumanMessage(content="I was reading this book last year, and it had this really cool description where it referred to something as looking like a manta ray. I\u2019m trying to figure out when I read that part, but I can\u2019t find what book it\u2019s from. This file I attached has a list of the books I read last year, with the date I started and finished reading each one. I\u2019d like for you to tell me the month in which I likely read the passage about the manta ray.")]}
395
- result = app.invoke(inputs)
396
- print(result)
 
 
 
 
 
1
+ """LangGraph Agent"""
2
  import os
 
 
 
3
  from dotenv import load_dotenv
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  from langgraph.graph import START, StateGraph, MessagesState
5
  from langgraph.prebuilt import tools_condition
6
  from langgraph.prebuilt import ToolNode
7
+ from langchain_google_genai import ChatGoogleGenerativeAI
8
+ from langchain_groq import ChatGroq
9
+ from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint, HuggingFaceEmbeddings
10
+ from langchain_community.tools.tavily_search import TavilySearchResults
11
+ from langchain_community.document_loaders import WikipediaLoader
12
+ from langchain_community.document_loaders import ArxivLoader
13
  from langchain_community.vectorstores import SupabaseVectorStore
14
+ from langchain_core.messages import SystemMessage, HumanMessage
15
+ from langchain_core.tools import tool
16
  from langchain.tools.retriever import create_retriever_tool
17
+ from supabase.client import Client, create_client
18
 
19
+ load_dotenv()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
  @tool
22
+ def multiply(a: int, b: int) -> int:
23
+ """Multiply two numbers.
24
+
25
  Args:
26
+ a: first int
27
+ b: second int
 
 
 
28
  """
29
+ return a * b
30
 
31
  @tool
32
+ def add(a: int, b: int) -> int:
33
+ """Add two numbers.
34
 
35
  Args:
36
+ a: first int
37
+ b: second int
 
 
 
38
  """
39
+ return a + b
40
 
41
  @tool
42
+ def subtract(a: int, b: int) -> int:
43
+ """Subtract two numbers.
44
 
45
  Args:
46
+ a: first int
47
+ b: second int
 
 
 
48
  """
49
+ return a - b
50
 
51
  @tool
52
+ def divide(a: int, b: int) -> int:
53
+ """Divide two numbers.
54
 
55
  Args:
56
+ a: first int
57
+ b: second int
 
 
 
 
 
 
58
  """
59
  if b == 0:
60
+ raise ValueError("Cannot divide by zero.")
61
+ return a / b
62
 
63
  @tool
64
  def modulus(a: int, b: int) -> int:
65
+ """Get the modulus of two numbers.
66
 
67
  Args:
68
+ a: first int
69
+ b: second int
 
 
 
 
 
 
70
  """
 
 
71
  return a % b
72
 
73
  @tool
74
+ def wiki_search(query: str) -> str:
75
+ """Search Wikipedia for a query and return maximum 2 results.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76
 
77
  Args:
78
+ query: The search query."""
79
+ search_docs = WikipediaLoader(query=query, load_max_docs=2).load()
80
+ formatted_search_docs = "\n\n---\n\n".join(
81
+ [
82
+ f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>'
83
+ for doc in search_docs
84
+ ])
85
+ return {"wiki_results": formatted_search_docs}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
 
87
  @tool
88
+ def web_search(query: str) -> str:
89
+ """Search Tavily for a query and return maximum 3 results.
90
 
91
  Args:
92
+ query: The search query."""
93
+ search_docs = TavilySearchResults(max_results=3).invoke(query=query)
94
+ formatted_search_docs = "\n\n---\n\n".join(
95
+ [
96
+ f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>'
97
+ for doc in search_docs
98
+ ])
99
+ return {"web_results": formatted_search_docs}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
100
 
101
  @tool
102
  def arvix_search(query: str) -> str:
103
+ """Search Arxiv for a query and return maximum 3 result.
104
 
105
  Args:
106
+ query: The search query."""
 
 
 
 
107
  search_docs = ArxivLoader(query=query, load_max_docs=3).load()
108
  formatted_search_docs = "\n\n---\n\n".join(
109
  [
 
112
  ])
113
  return {"arvix_results": formatted_search_docs}
114
 
115
+
116
+
117
+ # load the system prompt from the file
118
+ with open("system_prompt.txt", "r", encoding="utf-8") as f:
119
  system_prompt = f.read()
120
 
121
+ # System message
122
  sys_msg = SystemMessage(content=system_prompt)
123
 
124
+ # build a retriever
125
+ embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2") # dim=768
126
  supabase: Client = create_client(
127
  os.environ.get("SUPABASE_URL"),
128
  os.environ.get("SUPABASE_SERVICE_KEY"))
129
  vector_store = SupabaseVectorStore(
130
  client=supabase,
131
+ embedding= embeddings,
132
  table_name="documents",
133
+ query_name="match_documents_langchain",
134
  )
135
  create_retriever_tool = create_retriever_tool(
136
  retriever=vector_store.as_retriever(),
 
138
  description="A tool to retrieve similar questions from a vector store.",
139
  )
140
 
141
+
142
+
143
  tools = [
 
 
 
 
 
 
 
 
 
144
  multiply,
 
145
  add,
146
+ subtract,
147
+ divide,
148
+ modulus,
149
+ wiki_search,
150
+ web_search,
151
+ arvix_search,
152
  ]
153
 
154
+ # Build graph function
155
  def build_graph(provider: str = "groq"):
156
  """Build the graph"""
157
+ # Load environment variables from .env file
158
  if provider == "google":
159
+ # Google Gemini
160
  llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash", temperature=0)
161
  elif provider == "groq":
162
+ # Groq https://console.groq.com/docs/models
163
+ llm = ChatGroq(model="qwen-qwq-32b", temperature=0) # optional : qwen-qwq-32b gemma2-9b-it
164
  elif provider == "huggingface":
165
+ # TODO: Add huggingface endpoint
166
  llm = ChatHuggingFace(
167
  llm=HuggingFaceEndpoint(
168
  url="https://api-inference.huggingface.co/models/Meta-DeepLearning/llama-2-7b-chat-hf",
 
171
  )
172
  else:
173
  raise ValueError("Invalid provider. Choose 'google', 'groq' or 'huggingface'.")
174
+ # Bind tools to LLM
175
  llm_with_tools = llm.bind_tools(tools)
176
 
177
+ # Node
178
  def assistant(state: MessagesState):
179
+ """Assistant node"""
180
  return {"messages": [llm_with_tools.invoke(state["messages"])]}
181
 
182
  def retriever(state: MessagesState):
183
+ """Retriever node"""
184
  similar_question = vector_store.similarity_search(state["messages"][0].content)
185
  example_msg = HumanMessage(
186
  content=f"Here I provide a similar question and answer for reference: \n\n{similar_question[0].page_content}",
 
199
  )
200
  builder.add_edge("tools", "assistant")
201
 
202
+ # Compile graph
203
  return builder.compile()
204
 
205
+ # test
206
  if __name__ == "__main__":
207
+ question = "When was a picture of St. Thomas Aquinas first added to the Wikipedia page on the Principle of double effect?"
208
+ # Build the graph
209
+ graph = build_graph(provider="groq")
210
+ # Run the graph
211
+ messages = [HumanMessage(content=question)]
212
+ messages = graph.invoke({"messages": messages})
213
+ for m in messages["messages"]:
214
+ m.pretty_print()
app.py CHANGED
@@ -1,18 +1,24 @@
 
1
  import os
 
2
  import gradio as gr
3
  import requests
4
- import inspect
5
  import pandas as pd
6
  from langchain_core.messages import HumanMessage
7
  from agent import build_graph
8
 
 
 
9
  # (Keep Constants as is)
10
  # --- Constants ---
11
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
12
 
13
  # --- Basic Agent Definition ---
14
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
 
 
15
  class BasicAgent:
 
16
  def __init__(self):
17
  print("BasicAgent initialized.")
18
  self.graph = build_graph()
 
1
+ """ Basic Agent Evaluation Runner"""
2
  import os
3
+ import inspect
4
  import gradio as gr
5
  import requests
 
6
  import pandas as pd
7
  from langchain_core.messages import HumanMessage
8
  from agent import build_graph
9
 
10
+
11
+
12
  # (Keep Constants as is)
13
  # --- Constants ---
14
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
15
 
16
  # --- Basic Agent Definition ---
17
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
18
+
19
+
20
  class BasicAgent:
21
+ """A langgraph agent."""
22
  def __init__(self):
23
  print("BasicAgent initialized.")
24
  self.graph = build_graph()
metadata.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
requirements.txt CHANGED
@@ -1,14 +1,18 @@
1
  gradio
2
  requests
3
- dotenv
4
  langchain
5
  langchain-community
6
- wikipedia
7
- langchain_experimental
8
- duckduckgo-search
9
- langchain_google_genai
10
  langchain-groq
 
 
11
  langgraph
 
12
  supabase
13
- langchain-huggingface
14
- itsdangerous
 
 
 
 
1
  gradio
2
  requests
 
3
  langchain
4
  langchain-community
5
+ langchain-core
6
+ langchain-google-genai
7
+ langchain-huggingface
 
8
  langchain-groq
9
+ langchain-tavily
10
+ langchain-chroma
11
  langgraph
12
+ huggingface_hub
13
  supabase
14
+ arxiv
15
+ pymupdf
16
+ wikipedia
17
+ pgvector
18
+ python-dotenv
supabase_docs.csv ADDED
The diff for this file is too large to render. See raw diff
 
system_prompt.txt CHANGED
@@ -1,10 +1,5 @@
1
- You are a helpful assistant designed to answer questions using a set of tools. For each question, follow these steps:
2
- 1. Briefly explain your reasoning or thought process.
3
- 2. Conclude your response with the following template:
4
- FINAL ANSWER: [YOUR FINAL ANSWER]
5
- Formatting rules for YOUR FINAL ANSWER:
6
- If the answer is a number, write only the number (no commas, units, or symbols unless specifically requested).
7
- If the answer is a string, do not use articles or abbreviations, and write all digits in plain text unless otherwise specified.
8
- If the answer is a comma-separated list, apply the above rules to each element.
9
- Do not include any extra text before "FINAL ANSWER:" or after your answer.
10
- Always ensure your response starts with your reasoning and ends with the "FINAL ANSWER:" line as described.
 
1
+ You are a helpful assistant tasked with answering questions using a set of tools.
2
+ Now, I will ask you a question. Report your thoughts, and finish your answer with the following template:
3
+ FINAL ANSWER: [YOUR FINAL ANSWER].
4
+ YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise. If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise. If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string.
5
+ Your answer should only start with "FINAL ANSWER: ", then follows with the answer.
 
 
 
 
 
test.ipynb ADDED
@@ -0,0 +1,684 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "id": "d0cc4adf",
6
+ "metadata": {},
7
+ "source": [
8
+ "### Question data"
9
+ ]
10
+ },
11
+ {
12
+ "cell_type": "code",
13
+ "execution_count": 2,
14
+ "id": "14e3f417",
15
+ "metadata": {},
16
+ "outputs": [],
17
+ "source": [
18
+ "# Load metadata.jsonl\n",
19
+ "import json\n",
20
+ "# Load the metadata.jsonl file\n",
21
+ "with open('metadata.jsonl', 'r') as jsonl_file:\n",
22
+ " json_list = list(jsonl_file)\n",
23
+ "\n",
24
+ "json_QA = []\n",
25
+ "for json_str in json_list:\n",
26
+ " json_data = json.loads(json_str)\n",
27
+ " json_QA.append(json_data)"
28
+ ]
29
+ },
30
+ {
31
+ "cell_type": "code",
32
+ "execution_count": 3,
33
+ "id": "5e2da6fc",
34
+ "metadata": {},
35
+ "outputs": [
36
+ {
37
+ "name": "stdout",
38
+ "output_type": "stream",
39
+ "text": [
40
+ "==================================================\n",
41
+ "Task ID: ed58682d-bc52-4baa-9eb0-4eb81e1edacc\n",
42
+ "Question: What is the last word before the second chorus of the King of Pop's fifth single from his sixth studio album?\n",
43
+ "Level: 2\n",
44
+ "Final Answer: stare\n",
45
+ "Annotator Metadata: \n",
46
+ " ├── Steps: \n",
47
+ " │ ├── 1. Google searched \"King of Pop\".\n",
48
+ " │ ├── 2. Clicked on Michael Jackson's Wikipedia.\n",
49
+ " │ ├── 3. Scrolled down to \"Discography\".\n",
50
+ " │ ├── 4. Clicked on the sixth album, \"Thriller\".\n",
51
+ " │ ├── 5. Looked under \"Singles from Thriller\".\n",
52
+ " │ ├── 6. Clicked on the fifth single, \"Human Nature\".\n",
53
+ " │ ├── 7. Google searched \"Human Nature Michael Jackson Lyrics\".\n",
54
+ " │ ├── 8. Looked at the opening result with full lyrics sourced by Musixmatch.\n",
55
+ " │ ├── 9. Looked for repeating lyrics to determine the chorus.\n",
56
+ " │ ├── 10. Determined the chorus begins with \"If they say\" and ends with \"Does he do me that way?\"\n",
57
+ " │ ├── 11. Found the second instance of the chorus within the lyrics.\n",
58
+ " │ ├── 12. Noted the last word before the second chorus - \"stare\".\n",
59
+ " ├── Number of steps: 12\n",
60
+ " ├── How long did this take?: 20 minutes\n",
61
+ " ├── Tools:\n",
62
+ " │ ├── Web Browser\n",
63
+ " └── Number of tools: 1\n",
64
+ "==================================================\n"
65
+ ]
66
+ }
67
+ ],
68
+ "source": [
69
+ "# randomly select 3 samples\n",
70
+ "# {\"task_id\": \"c61d22de-5f6c-4958-a7f6-5e9707bd3466\", \"Question\": \"A paper about AI regulation that was originally submitted to arXiv.org in June 2022 shows a figure with three axes, where each axis has a label word at both ends. Which of these words is used to describe a type of society in a Physics and Society article submitted to arXiv.org on August 11, 2016?\", \"Level\": 2, \"Final answer\": \"egalitarian\", \"file_name\": \"\", \"Annotator Metadata\": {\"Steps\": \"1. Go to arxiv.org and navigate to the Advanced Search page.\\n2. Enter \\\"AI regulation\\\" in the search box and select \\\"All fields\\\" from the dropdown.\\n3. Enter 2022-06-01 and 2022-07-01 into the date inputs, select \\\"Submission date (original)\\\", and submit the search.\\n4. Go through the search results to find the article that has a figure with three axes and labels on each end of the axes, titled \\\"Fairness in Agreement With European Values: An Interdisciplinary Perspective on AI Regulation\\\".\\n5. Note the six words used as labels: deontological, egalitarian, localized, standardized, utilitarian, and consequential.\\n6. Go back to arxiv.org\\n7. Find \\\"Physics and Society\\\" and go to the page for the \\\"Physics and Society\\\" category.\\n8. Note that the tag for this category is \\\"physics.soc-ph\\\".\\n9. Go to the Advanced Search page.\\n10. Enter \\\"physics.soc-ph\\\" in the search box and select \\\"All fields\\\" from the dropdown.\\n11. Enter 2016-08-11 and 2016-08-12 into the date inputs, select \\\"Submission date (original)\\\", and submit the search.\\n12. Search for instances of the six words in the results to find the paper titled \\\"Phase transition from egalitarian to hierarchical societies driven by competition between cognitive and social constraints\\\", indicating that \\\"egalitarian\\\" is the correct answer.\", \"Number of steps\": \"12\", \"How long did this take?\": \"8 minutes\", \"Tools\": \"1. Web browser\\n2. Image recognition tools (to identify and parse a figure with three axes)\", \"Number of tools\": \"2\"}}\n",
71
+ "\n",
72
+ "import random\n",
73
+ "# random.seed(42)\n",
74
+ "random_samples = random.sample(json_QA, 1)\n",
75
+ "for sample in random_samples:\n",
76
+ " print(\"=\" * 50)\n",
77
+ " print(f\"Task ID: {sample['task_id']}\")\n",
78
+ " print(f\"Question: {sample['Question']}\")\n",
79
+ " print(f\"Level: {sample['Level']}\")\n",
80
+ " print(f\"Final Answer: {sample['Final answer']}\")\n",
81
+ " print(f\"Annotator Metadata: \")\n",
82
+ " print(f\" ├── Steps: \")\n",
83
+ " for step in sample['Annotator Metadata']['Steps'].split('\\n'):\n",
84
+ " print(f\" │ ├── {step}\")\n",
85
+ " print(f\" ├── Number of steps: {sample['Annotator Metadata']['Number of steps']}\")\n",
86
+ " print(f\" ├── How long did this take?: {sample['Annotator Metadata']['How long did this take?']}\")\n",
87
+ " print(f\" ├── Tools:\")\n",
88
+ " for tool in sample['Annotator Metadata']['Tools'].split('\\n'):\n",
89
+ " print(f\" │ ├── {tool}\")\n",
90
+ " print(f\" └── Number of tools: {sample['Annotator Metadata']['Number of tools']}\")\n",
91
+ "print(\"=\" * 50)"
92
+ ]
93
+ },
94
+ {
95
+ "cell_type": "code",
96
+ "execution_count": 56,
97
+ "id": "4bb02420",
98
+ "metadata": {},
99
+ "outputs": [],
100
+ "source": [
101
+ "### build a vector database based on the metadata.jsonl\n",
102
+ "# https://python.langchain.com/docs/integrations/vectorstores/supabase/\n",
103
+ "import os\n",
104
+ "from dotenv import load_dotenv\n",
105
+ "from langchain_huggingface import HuggingFaceEmbeddings\n",
106
+ "from langchain_community.vectorstores import SupabaseVectorStore\n",
107
+ "from supabase.client import Client, create_client\n",
108
+ "\n",
109
+ "\n",
110
+ "load_dotenv()\n",
111
+ "embeddings = HuggingFaceEmbeddings(model_name=\"sentence-transformers/all-mpnet-base-v2\") # dim=768\n",
112
+ "\n",
113
+ "supabase_url = os.environ.get(\"SUPABASE_URL\")\n",
114
+ "supabase_key = os.environ.get(\"SUPABASE_SERVICE_KEY\")\n",
115
+ "supabase: Client = create_client(supabase_url, supabase_key)"
116
+ ]
117
+ },
118
+ {
119
+ "cell_type": "code",
120
+ "execution_count": null,
121
+ "id": "a070b955",
122
+ "metadata": {},
123
+ "outputs": [],
124
+ "source": [
125
+ "# wrap the metadata.jsonl's questions and answers into a list of document\n",
126
+ "from langchain.schema import Document\n",
127
+ "docs = []\n",
128
+ "for sample in json_QA:\n",
129
+ " content = f\"Question : {sample['Question']}\\n\\nFinal answer : {sample['Final answer']}\"\n",
130
+ " doc = {\n",
131
+ " \"content\" : content,\n",
132
+ " \"metadata\" : { # meatadata的格式必须时source键,否则会报错\n",
133
+ " \"source\" : sample['task_id']\n",
134
+ " },\n",
135
+ " \"embedding\" : embeddings.embed_query(content),\n",
136
+ " }\n",
137
+ " docs.append(doc)\n",
138
+ "\n",
139
+ "# upload the documents to the vector database\n",
140
+ "try:\n",
141
+ " response = (\n",
142
+ " supabase.table(\"documents\")\n",
143
+ " .insert(docs)\n",
144
+ " .execute()\n",
145
+ " )\n",
146
+ "except Exception as exception:\n",
147
+ " print(\"Error inserting data into Supabase:\", exception)\n",
148
+ "\n",
149
+ "# ALTERNATIVE : Save the documents (a list of dict) into a csv file, and manually upload it to Supabase\n",
150
+ "# import pandas as pd\n",
151
+ "# df = pd.DataFrame(docs)\n",
152
+ "# df.to_csv('supabase_docs.csv', index=False)"
153
+ ]
154
+ },
155
+ {
156
+ "cell_type": "code",
157
+ "execution_count": 54,
158
+ "id": "77fb9dbb",
159
+ "metadata": {},
160
+ "outputs": [],
161
+ "source": [
162
+ "# add items to vector database\n",
163
+ "vector_store = SupabaseVectorStore(\n",
164
+ " client=supabase,\n",
165
+ " embedding= embeddings,\n",
166
+ " table_name=\"documents\",\n",
167
+ " query_name=\"match_documents_langchain\",\n",
168
+ ")\n",
169
+ "retriever = vector_store.as_retriever()"
170
+ ]
171
+ },
172
+ {
173
+ "cell_type": "code",
174
+ "execution_count": 55,
175
+ "id": "12a05971",
176
+ "metadata": {},
177
+ "outputs": [
178
+ {
179
+ "name": "stderr",
180
+ "output_type": "stream",
181
+ "text": [
182
+ "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
183
+ "To disable this warning, you can either:\n",
184
+ "\t- Avoid using `tokenizers` before the fork if possible\n",
185
+ "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n"
186
+ ]
187
+ },
188
+ {
189
+ "data": {
190
+ "text/plain": [
191
+ "Document(metadata={'source': '840bfca7-4f7b-481a-8794-c560c340185d'}, page_content='Question : On June 6, 2023, an article by Carolyn Collins Petersen was published in Universe Today. This article mentions a team that produced a paper about their observations, linked at the bottom of the article. Find this paper. Under what NASA award number was the work performed by R. G. Arendt supported by?\\n\\nFinal answer : 80GSFC21M0002')"
192
+ ]
193
+ },
194
+ "execution_count": 55,
195
+ "metadata": {},
196
+ "output_type": "execute_result"
197
+ }
198
+ ],
199
+ "source": [
200
+ "query = \"On June 6, 2023, an article by Carolyn Collins Petersen was published in Universe Today. This article mentions a team that produced a paper about their observations, linked at the bottom of the article. Find this paper. Under what NASA award number was the work performed by R. G. Arendt supported by?\"\n",
201
+ "# matched_docs = vector_store.similarity_search(query, 2)\n",
202
+ "docs = retriever.invoke(query)\n",
203
+ "docs[0]"
204
+ ]
205
+ },
206
+ {
207
+ "cell_type": "code",
208
+ "execution_count": 31,
209
+ "id": "1eae5ba4",
210
+ "metadata": {},
211
+ "outputs": [
212
+ {
213
+ "name": "stdout",
214
+ "output_type": "stream",
215
+ "text": [
216
+ "List of tools used in all samples:\n",
217
+ "Total number of tools used: 83\n",
218
+ " ├── web browser: 107\n",
219
+ " ├── image recognition tools (to identify and parse a figure with three axes): 1\n",
220
+ " ├── search engine: 101\n",
221
+ " ├── calculator: 34\n",
222
+ " ├── unlambda compiler (optional): 1\n",
223
+ " ├── a web browser.: 2\n",
224
+ " ├── a search engine.: 2\n",
225
+ " ├── a calculator.: 1\n",
226
+ " ├── microsoft excel: 5\n",
227
+ " ├── google search: 1\n",
228
+ " ├── ne: 9\n",
229
+ " ├── pdf access: 7\n",
230
+ " ├── file handling: 2\n",
231
+ " ├── python: 3\n",
232
+ " ├── image recognition tools: 12\n",
233
+ " ├── jsonld file access: 1\n",
234
+ " ├── video parsing: 1\n",
235
+ " ├── python compiler: 1\n",
236
+ " ├── video recognition tools: 3\n",
237
+ " ├── pdf viewer: 7\n",
238
+ " ├── microsoft excel / google sheets: 3\n",
239
+ " ├── word document access: 1\n",
240
+ " ├── tool to extract text from images: 1\n",
241
+ " ├── a word reversal tool / script: 1\n",
242
+ " ├── counter: 1\n",
243
+ " ├── excel: 3\n",
244
+ " ├── image recognition: 5\n",
245
+ " ├── color recognition: 3\n",
246
+ " ├── excel file access: 3\n",
247
+ " ├── xml file access: 1\n",
248
+ " ├── access to the internet archive, web.archive.org: 1\n",
249
+ " ├── text processing/diff tool: 1\n",
250
+ " ├── gif parsing tools: 1\n",
251
+ " ├── a web browser: 7\n",
252
+ " ├── a search engine: 7\n",
253
+ " ├── a speech-to-text tool: 2\n",
254
+ " ├── code/data analysis tools: 1\n",
255
+ " ├── audio capability: 2\n",
256
+ " ├── pdf reader: 1\n",
257
+ " ├── markdown: 1\n",
258
+ " ├── a calculator: 5\n",
259
+ " ├── access to wikipedia: 3\n",
260
+ " ├── image recognition/ocr: 3\n",
261
+ " ├── google translate access: 1\n",
262
+ " ├── ocr: 4\n",
263
+ " ├── bass note data: 1\n",
264
+ " ├── text editor: 1\n",
265
+ " ├── xlsx file access: 1\n",
266
+ " ├── powerpoint viewer: 1\n",
267
+ " ├── csv file access: 1\n",
268
+ " ├── calculator (or use excel): 1\n",
269
+ " ├── computer algebra system: 1\n",
270
+ " ├── video processing software: 1\n",
271
+ " ├── audio processing software: 1\n",
272
+ " ├── computer vision: 1\n",
273
+ " ├── google maps: 1\n",
274
+ " ├── access to excel files: 1\n",
275
+ " ├── calculator (or ability to count): 1\n",
276
+ " ├── a file interface: 3\n",
277
+ " ├── a python ide: 1\n",
278
+ " ├── spreadsheet editor: 1\n",
279
+ " ├── tools required: 1\n",
280
+ " ├── b browser: 1\n",
281
+ " ├── image recognition and processing tools: 1\n",
282
+ " ├── computer vision or ocr: 1\n",
283
+ " ├── c++ compiler: 1\n",
284
+ " ├── access to google maps: 1\n",
285
+ " ├── youtube player: 1\n",
286
+ " ├── natural language processor: 1\n",
287
+ " ├── graph interaction tools: 1\n",
288
+ " ├── bablyonian cuniform -> arabic legend: 1\n",
289
+ " ├── access to youtube: 1\n",
290
+ " ├── image search tools: 1\n",
291
+ " ├── calculator or counting function: 1\n",
292
+ " ├── a speech-to-text audio processing tool: 1\n",
293
+ " ├── access to academic journal websites: 1\n",
294
+ " ├── pdf reader/extracter: 1\n",
295
+ " ├── rubik's cube model: 1\n",
296
+ " ├── wikipedia: 1\n",
297
+ " ├── video capability: 1\n",
298
+ " ├── image processing tools: 1\n",
299
+ " ├── age recognition software: 1\n",
300
+ " ├── youtube: 1\n"
301
+ ]
302
+ }
303
+ ],
304
+ "source": [
305
+ "# list of the tools used in all the samples\n",
306
+ "from collections import Counter, OrderedDict\n",
307
+ "\n",
308
+ "tools = []\n",
309
+ "for sample in json_QA:\n",
310
+ " for tool in sample['Annotator Metadata']['Tools'].split('\\n'):\n",
311
+ " tool = tool[2:].strip().lower()\n",
312
+ " if tool.startswith(\"(\"):\n",
313
+ " tool = tool[11:].strip()\n",
314
+ " tools.append(tool)\n",
315
+ "tools_counter = OrderedDict(Counter(tools))\n",
316
+ "print(\"List of tools used in all samples:\")\n",
317
+ "print(\"Total number of tools used:\", len(tools_counter))\n",
318
+ "for tool, count in tools_counter.items():\n",
319
+ " print(f\" ├── {tool}: {count}\")"
320
+ ]
321
+ },
322
+ {
323
+ "cell_type": "markdown",
324
+ "id": "5efee12a",
325
+ "metadata": {},
326
+ "source": [
327
+ "#### Graph"
328
+ ]
329
+ },
330
+ {
331
+ "cell_type": "code",
332
+ "execution_count": 55,
333
+ "id": "7fe573cc",
334
+ "metadata": {},
335
+ "outputs": [],
336
+ "source": [
337
+ "system_prompt = \"\"\"\n",
338
+ "You are a helpful assistant tasked with answering questions using a set of tools.\n",
339
+ "If the tool is not available, you can try to find the information online. You can also use your own knowledge to answer the question. \n",
340
+ "You need to provide a step-by-step explanation of how you arrived at the answer.\n",
341
+ "==========================\n",
342
+ "Here is a few examples showing you how to answer the question step by step.\n",
343
+ "\"\"\"\n",
344
+ "for i, samples in enumerate(random_samples):\n",
345
+ " system_prompt += f\"\\nQuestion {i+1}: {samples['Question']}\\nSteps:\\n{samples['Annotator Metadata']['Steps']}\\nTools:\\n{samples['Annotator Metadata']['Tools']}\\nFinal Answer: {samples['Final answer']}\\n\"\n",
346
+ "system_prompt += \"\\n==========================\\n\"\n",
347
+ "system_prompt += \"Now, please answer the following question step by step.\\n\"\n",
348
+ "\n",
349
+ "# save the system_prompt to a file\n",
350
+ "with open('system_prompt.txt', 'w') as f:\n",
351
+ " f.write(system_prompt)"
352
+ ]
353
+ },
354
+ {
355
+ "cell_type": "code",
356
+ "execution_count": 56,
357
+ "id": "d6beb0da",
358
+ "metadata": {},
359
+ "outputs": [
360
+ {
361
+ "name": "stdout",
362
+ "output_type": "stream",
363
+ "text": [
364
+ "\n",
365
+ "You are a helpful assistant tasked with answering questions using a set of tools.\n",
366
+ "If the tool is not available, you can try to find the information online. You can also use your own knowledge to answer the question. \n",
367
+ "You need to provide a step-by-step explanation of how you arrived at the answer.\n",
368
+ "==========================\n",
369
+ "Here is a few examples showing you how to answer the question step by step.\n",
370
+ "\n",
371
+ "Question 1: In terms of geographical distance between capital cities, which 2 countries are the furthest from each other within the ASEAN bloc according to wikipedia? Answer using a comma separated list, ordering the countries by alphabetical order.\n",
372
+ "Steps:\n",
373
+ "1. Search the web for \"ASEAN bloc\".\n",
374
+ "2. Click the Wikipedia result for the ASEAN Free Trade Area.\n",
375
+ "3. Scroll down to find the list of member states.\n",
376
+ "4. Click into the Wikipedia pages for each member state, and note its capital.\n",
377
+ "5. Search the web for the distance between the first two capitals. The results give travel distance, not geographic distance, which might affect the answer.\n",
378
+ "6. Thinking it might be faster to judge the distance by looking at a map, search the web for \"ASEAN bloc\" and click into the images tab.\n",
379
+ "7. View a map of the member countries. Since they're clustered together in an arrangement that's not very linear, it's difficult to judge distances by eye.\n",
380
+ "8. Return to the Wikipedia page for each country. Click the GPS coordinates for each capital to get the coordinates in decimal notation.\n",
381
+ "9. Place all these coordinates into a spreadsheet.\n",
382
+ "10. Write formulas to calculate the distance between each capital.\n",
383
+ "11. Write formula to get the largest distance value in the spreadsheet.\n",
384
+ "12. Note which two capitals that value corresponds to: Jakarta and Naypyidaw.\n",
385
+ "13. Return to the Wikipedia pages to see which countries those respective capitals belong to: Indonesia, Myanmar.\n",
386
+ "Tools:\n",
387
+ "1. Search engine\n",
388
+ "2. Web browser\n",
389
+ "3. Microsoft Excel / Google Sheets\n",
390
+ "Final Answer: Indonesia, Myanmar\n",
391
+ "\n",
392
+ "Question 2: Review the chess position provided in the image. It is black's turn. Provide the correct next move for black which guarantees a win. Please provide your response in algebraic notation.\n",
393
+ "Steps:\n",
394
+ "Step 1: Evaluate the position of the pieces in the chess position\n",
395
+ "Step 2: Report the best move available for black: \"Rd5\"\n",
396
+ "Tools:\n",
397
+ "1. Image recognition tools\n",
398
+ "Final Answer: Rd5\n",
399
+ "\n",
400
+ "==========================\n",
401
+ "Now, please answer the following question step by step.\n",
402
+ "\n"
403
+ ]
404
+ }
405
+ ],
406
+ "source": [
407
+ "# load the system prompt from the file\n",
408
+ "with open('system_prompt.txt', 'r') as f:\n",
409
+ " system_prompt = f.read()\n",
410
+ "print(system_prompt)"
411
+ ]
412
+ },
413
+ {
414
+ "cell_type": "code",
415
+ "execution_count": null,
416
+ "id": "42fde0f8",
417
+ "metadata": {},
418
+ "outputs": [],
419
+ "source": [
420
+ "import dotenv\n",
421
+ "from langgraph.graph import MessagesState, START, StateGraph\n",
422
+ "from langgraph.prebuilt import tools_condition\n",
423
+ "from langgraph.prebuilt import ToolNode\n",
424
+ "from langchain_google_genai import ChatGoogleGenerativeAI\n",
425
+ "from langchain_huggingface import HuggingFaceEmbeddings\n",
426
+ "from langchain_community.tools.tavily_search import TavilySearchResults\n",
427
+ "from langchain_community.document_loaders import WikipediaLoader\n",
428
+ "from langchain_community.document_loaders import ArxivLoader\n",
429
+ "from langchain_community.vectorstores import SupabaseVectorStore\n",
430
+ "from langchain.tools.retriever import create_retriever_tool\n",
431
+ "from langchain_core.messages import HumanMessage, SystemMessage\n",
432
+ "from langchain_core.tools import tool\n",
433
+ "from supabase.client import Client, create_client\n",
434
+ "\n",
435
+ "# Define the retriever from supabase\n",
436
+ "load_dotenv()\n",
437
+ "embeddings = HuggingFaceEmbeddings(model_name=\"sentence-transformers/all-mpnet-base-v2\") # dim=768\n",
438
+ "\n",
439
+ "supabase_url = os.environ.get(\"SUPABASE_URL\")\n",
440
+ "supabase_key = os.environ.get(\"SUPABASE_SERVICE_KEY\")\n",
441
+ "supabase: Client = create_client(supabase_url, supabase_key)\n",
442
+ "vector_store = SupabaseVectorStore(\n",
443
+ " client=supabase,\n",
444
+ " embedding= embeddings,\n",
445
+ " table_name=\"documents\",\n",
446
+ " query_name=\"match_documents_langchain\",\n",
447
+ ")\n",
448
+ "\n",
449
+ "question_retrieve_tool = create_retriever_tool(\n",
450
+ " vector_store.as_retriever(),\n",
451
+ " \"Question Retriever\",\n",
452
+ " \"Find similar questions in the vector database for the given question.\",\n",
453
+ ")\n",
454
+ "\n",
455
+ "@tool\n",
456
+ "def multiply(a: int, b: int) -> int:\n",
457
+ " \"\"\"Multiply two numbers.\n",
458
+ "\n",
459
+ " Args:\n",
460
+ " a: first int\n",
461
+ " b: second int\n",
462
+ " \"\"\"\n",
463
+ " return a * b\n",
464
+ "\n",
465
+ "@tool\n",
466
+ "def add(a: int, b: int) -> int:\n",
467
+ " \"\"\"Add two numbers.\n",
468
+ " \n",
469
+ " Args:\n",
470
+ " a: first int\n",
471
+ " b: second int\n",
472
+ " \"\"\"\n",
473
+ " return a + b\n",
474
+ "\n",
475
+ "@tool\n",
476
+ "def subtract(a: int, b: int) -> int:\n",
477
+ " \"\"\"Subtract two numbers.\n",
478
+ " \n",
479
+ " Args:\n",
480
+ " a: first int\n",
481
+ " b: second int\n",
482
+ " \"\"\"\n",
483
+ " return a - b\n",
484
+ "\n",
485
+ "@tool\n",
486
+ "def divide(a: int, b: int) -> int:\n",
487
+ " \"\"\"Divide two numbers.\n",
488
+ " \n",
489
+ " Args:\n",
490
+ " a: first int\n",
491
+ " b: second int\n",
492
+ " \"\"\"\n",
493
+ " if b == 0:\n",
494
+ " raise ValueError(\"Cannot divide by zero.\")\n",
495
+ " return a / b\n",
496
+ "\n",
497
+ "@tool\n",
498
+ "def modulus(a: int, b: int) -> int:\n",
499
+ " \"\"\"Get the modulus of two numbers.\n",
500
+ " \n",
501
+ " Args:\n",
502
+ " a: first int\n",
503
+ " b: second int\n",
504
+ " \"\"\"\n",
505
+ " return a % b\n",
506
+ "\n",
507
+ "@tool\n",
508
+ "def wiki_search(query: str) -> str:\n",
509
+ " \"\"\"Search Wikipedia for a query and return maximum 2 results.\n",
510
+ " \n",
511
+ " Args:\n",
512
+ " query: The search query.\"\"\"\n",
513
+ " search_docs = WikipediaLoader(query=query, load_max_docs=2).load()\n",
514
+ " formatted_search_docs = \"\\n\\n---\\n\\n\".join(\n",
515
+ " [\n",
516
+ " f'<Document source=\"{doc.metadata[\"source\"]}\" page=\"{doc.metadata.get(\"page\", \"\")}\"/>\\n{doc.page_content}\\n</Document>'\n",
517
+ " for doc in search_docs\n",
518
+ " ])\n",
519
+ " return {\"wiki_results\": formatted_search_docs}\n",
520
+ "\n",
521
+ "@tool\n",
522
+ "def web_search(query: str) -> str:\n",
523
+ " \"\"\"Search Tavily for a query and return maximum 3 results.\n",
524
+ " \n",
525
+ " Args:\n",
526
+ " query: The search query.\"\"\"\n",
527
+ " search_docs = TavilySearchResults(max_results=3).invoke(query=query)\n",
528
+ " formatted_search_docs = \"\\n\\n---\\n\\n\".join(\n",
529
+ " [\n",
530
+ " f'<Document source=\"{doc.metadata[\"source\"]}\" page=\"{doc.metadata.get(\"page\", \"\")}\"/>\\n{doc.page_content}\\n</Document>'\n",
531
+ " for doc in search_docs\n",
532
+ " ])\n",
533
+ " return {\"web_results\": formatted_search_docs}\n",
534
+ "\n",
535
+ "@tool\n",
536
+ "def arvix_search(query: str) -> str:\n",
537
+ " \"\"\"Search Arxiv for a query and return maximum 3 result.\n",
538
+ " \n",
539
+ " Args:\n",
540
+ " query: The search query.\"\"\"\n",
541
+ " search_docs = ArxivLoader(query=query, load_max_docs=3).load()\n",
542
+ " formatted_search_docs = \"\\n\\n---\\n\\n\".join(\n",
543
+ " [\n",
544
+ " f'<Document source=\"{doc.metadata[\"source\"]}\" page=\"{doc.metadata.get(\"page\", \"\")}\"/>\\n{doc.page_content[:1000]}\\n</Document>'\n",
545
+ " for doc in search_docs\n",
546
+ " ])\n",
547
+ " return {\"arvix_results\": formatted_search_docs}\n",
548
+ "\n",
549
+ "@tool\n",
550
+ "def similar_question_search(question: str) -> str:\n",
551
+ " \"\"\"Search the vector database for similar questions and return the first results.\n",
552
+ " \n",
553
+ " Args:\n",
554
+ " question: the question human provided.\"\"\"\n",
555
+ " matched_docs = vector_store.similarity_search(query, 3)\n",
556
+ " formatted_search_docs = \"\\n\\n---\\n\\n\".join(\n",
557
+ " [\n",
558
+ " f'<Document source=\"{doc.metadata[\"source\"]}\" page=\"{doc.metadata.get(\"page\", \"\")}\"/>\\n{doc.page_content[:1000]}\\n</Document>'\n",
559
+ " for doc in matched_docs\n",
560
+ " ])\n",
561
+ " return {\"similar_questions\": formatted_search_docs}\n",
562
+ "\n",
563
+ "tools = [\n",
564
+ " multiply,\n",
565
+ " add,\n",
566
+ " subtract,\n",
567
+ " divide,\n",
568
+ " modulus,\n",
569
+ " wiki_search,\n",
570
+ " web_search,\n",
571
+ " arvix_search,\n",
572
+ " question_retrieve_tool\n",
573
+ "]\n",
574
+ "\n",
575
+ "llm = ChatGoogleGenerativeAI(model=\"gemini-2.0-flash\")\n",
576
+ "llm_with_tools = llm.bind_tools(tools)"
577
+ ]
578
+ },
579
+ {
580
+ "cell_type": "code",
581
+ "execution_count": null,
582
+ "id": "7dd0716c",
583
+ "metadata": {},
584
+ "outputs": [],
585
+ "source": [
586
+ "# load the system prompt from the file\n",
587
+ "with open('system_prompt.txt', 'r') as f:\n",
588
+ " system_prompt = f.read()\n",
589
+ "\n",
590
+ "\n",
591
+ "# System message\n",
592
+ "sys_msg = SystemMessage(content=system_prompt)\n",
593
+ "\n",
594
+ "# Node\n",
595
+ "def assistant(state: MessagesState):\n",
596
+ " \"\"\"Assistant node\"\"\"\n",
597
+ " return {\"messages\": [llm_with_tools.invoke([sys_msg] + state[\"messages\"])]}\n",
598
+ "\n",
599
+ "# Build graph\n",
600
+ "builder = StateGraph(MessagesState)\n",
601
+ "builder.add_node(\"assistant\", assistant)\n",
602
+ "builder.add_node(\"tools\", ToolNode(tools))\n",
603
+ "builder.add_edge(START, \"assistant\")\n",
604
+ "builder.add_conditional_edges(\n",
605
+ " \"assistant\",\n",
606
+ " # If the latest message (result) from assistant is a tool call -> tools_condition routes to tools\n",
607
+ " # If the latest message (result) from assistant is a not a tool call -> tools_condition routes to END\n",
608
+ " tools_condition,\n",
609
+ ")\n",
610
+ "builder.add_edge(\"tools\", \"assistant\")\n",
611
+ "\n",
612
+ "# Compile graph\n",
613
+ "graph = builder.compile()\n"
614
+ ]
615
+ },
616
+ {
617
+ "cell_type": "code",
618
+ "execution_count": 49,
619
+ "id": "f4e77216",
620
+ "metadata": {},
621
+ "outputs": [
622
+ {
623
+ "data": {
624
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAANgAAAD5CAIAAADKsmwpAAAQAElEQVR4nOydB1wUR9vA5zrcwdGOXqRIFRC7gkZsxK7YguU1xhgTJcVXjVETNSYajCbGYCxYYuJnjYliYq+xRo2xIIqAgNI7HFzh+vfo5UVEQEzYuzl2/r/7HXu7e7dX/jwz88zsLFun0yECwdiwEYGAAUREAhYQEQlYQEQkYAERkYAFREQCFpikiAq5pixfKavWyKrVarVOrTSBDBTPnMnmMviWbL6Q5ehuhgjPYkoiSqtU6TekmcmSqjKVpS2Hb8mC31Voy0GmkArValDRQ4WsWsrhMbPvy7yCBd4hcLNAhCcwTCKhrdXoLv9WVpqvsHPhegdbuLY1R6ZMjUyTlSzNTZflZ9aED7Xz7WCJaI8JiHj3ivj3fSXhw+w6RNqg1gWE9suHyhQyTdR/nMwtWIjG4C7i7/uKzfjM7kNEqPVSWqBIXJc38HUnN18+oitYi3hyR5GTl1lIhBWiAQfW5fWKFolceIiW4Cti4vq8tmEWweG0sFDPgXW5IRHW8KkR/WAiLLmQWOIZJKCVhUB0rNuVo2UVRUpEP3AUMfVGNZvDDIu0RvRj4nyPs/uKaTg2D0cRz+0r6diXjhYCDAYDigLIVSGagZ2If52qCI4Q8szpm8vo2Nfm3tWqGqkG0Qm8RIQiKTtVFj60NSdrmsMro+xvnatEdAIvETPvSKFPFtEeD39+8mUxohN4/erQ8QWdsMiwfPTRR7/99ht6efr375+fn48oAHpZrEXcgodyRBvwErGyROUdYmgRU1JS0MtTWFhYWUlh6enX2SInTYZoA0YiQvW8olhJXTMlMTFx3LhxERER/fr1+/DDD4uKimBl586dIaotXbo0MjISHmo0mo0bN44cOTI8PHzQoEErVqyQy/8OSxD/du3a9f777/fo0ePChQtDhw6FlcOHD58zZw6iAIGQXZpLo4QiRiJKq9Tw7SNquHnz5rJly8aPH793795vv/0Wgtn8+fNh/ZEjR+AevDx48CAsgGo//PDDzJkz9+zZs2TJknPnzq1bt07/Cmw2e//+/W3btk1ISOjSpUtcXBys3LFjx2effYYoAL4K+EIQbcBoPKK0SiMQUhUOMzIyeDzesGHDwCc3NzcIdQUFBbDeyupx5w2fz9cvQBSEgAe2wbKHh0dUVNSlS5f0rwAZPjMzM4iI+ocCweMqhFAo1C+0OAIrllRMowwORiLqtDouZU1mKILBpGnTpo0YMaJbt24uLi52dnbP72ZtbX348GGIncXFxWq1WiaTgaO1W0NDQ5GhYLEZXDMaJRAw+qh8IVtcokLU4OnpuW3bNoiFa9euhYrdlClTkpOTn99t1apVW7Zsgark5s2boZiOjo6uu9XCwnDDESSVanAR0QaMRIRyGUpnRBm+vr4Q6k6ePAmVPBaLNWvWLKXymdYAtFSgpvj6668PHjzY1dVVJBJJJBJkJCitqGAIThHRkm3rxNFqKenvh/iXlJQEC6Bgp06dZsyYAe2VsrK/u3T1gwy0Wi24qK8sAlKp9Pz5802PP6BudIJCprF3p9HYRLxqIWZ8FnSuIAq4fPny7NmzT58+nZubm5qaCo1iZ2dnJycn3hNu3LgBK6ES6e/vf+jQIdgnPT0dQibkeqqqqh4+fAj1xXovCM0UuL948WJmZiaigNS/qp09TfvUnJcCLxE92wke3qVExKlTp0KFb82aNWPGjImNjYVIFh8fD+bBJqgvnjp1ClI2kDJcvHgxBEWoIy5YsCAmJgb2BFknT54MbZd6LxgYGAi5xm+++WblypWopdGodXkP5B4BNDpzAK8R2nKJ+sSOohHvuCJ6k3VXkpMmfyXaHtEGvCKiuQXbxpF7m2YDT57n8q9ldBudjt0J9hHDRAnzM9r3bnhgLJSb0EHX4CZoAnO53AY3eXl5Qe4GUcMPT2hwE6R7Gmt3Q8m+YcOGBjfdv17l4G5m69jwZ2mt4Hjy1K1zlQyGrv0rDZ/FXF1d3eB6hUIBIuqrffVgMpkU9X/oj1svDVSLSqXicDgNboLGe91UeV0ObcnvPcbe0rrhJ7ZWMD2LD36Mdt2tDD8kzOjQ9oNj2ok0dJrL+f0lZYUKRCfO7C128jSjoYUI5/Oaoet579c5r4yyd/GhRTrt7E/Fbr7mtJ0HB99udQaTEfOhxx9HylKuVaFWjVajO7Auz9aJS+fZmExgEqbLh0qzU2Thw0StMsH754ny1OvVkWPt6TzxDTKVaelK8hSXfysVCNlQTEMVylxg8qMBinNqslNl109UhEVadx1oy2TSaKBNg5iGiHpy02UQPLKSpfbuPCsRB7yEG1/I0moR/rAYSFyukoo1OqS7/2c1vPO27QWhr1hzuOSsxceYkoi1FGTJS/OU0io13JgMhkzSkoPHZDLZo0ePIOGMWhRLGw581QIrlqUtx83HXGBFZi9/BpMUkVJSUlKWL1++Y8cORDAg5P+SgAVERAIWEBEJWEBEJGABEZGABUREAhYQEQlYQEQkYAERkYAFREQCFhARCVhARCRgARGRgAVERAIWEBEJWEBEJGABEZGABUREAhYQEQlYQEQkYAERkYAFREQCFhARCVhARKwPg8Gwt6fR5NWYQESsj06nKykpQQTDQkQkYAERkYAFREQCFhARCVhARCRgARGRgAVERAIWEBEJWEBEJGABEZGABUREAhYQEQlYQEQkYAERkYAFREQCFpAL/vzN+PHjJRIJg8FQKpVisVgkEsGyQqE4fvw4IlAPuRDc3wwaNKi4uDg/P7+0tFSlUhUUFMCypSV9r1trYIiIfxMTE+Pu7l53DUTE3r17I4JBICL+DZfLHTlyJIv19AK8Hh4eY8aMQQSDQER8yrhx41xdXfXLEA779Onj7OyMCAaBiPgUCIqjR4/WB0UIh2PHjkUEQ0FEfAYIii4uLvpw6OjoiAiGAsc8olyiKStQKBXGySuNGDD9999/79lxdGayFBkcBtIJrNm2jlw2h14xAq88orJGe2pXUV6G3N1foJRrEf3g8hgVxSqtVuvfybLzAFtEGzASUS7V7F+b132YvYObOaI9fx4rMeMzw4fZIXqAUfzfvTK730QXYqGeLgPta+TaP0+UI3qAi4i3z1cGdLUSCEnf91O6vGr/8K5MLlUjGoCLiEWPavhCDiLUg4EqClWIBuAiokqpE9oSEetj52xWXU6LiIhLUVgj0eg0iFAPpUKjpcfwKFInI2ABEZGABUREAhYQEQlYQEQkYAERkYAFREQCFhARCVhARCRgARGRgAVERAIWkHNWUGbmgz79Ot+5cwsRjAcREYnsHWZ9MN/Fxa2JfbKyMmImDEX/jpGj+hcU5iNCQ5CiGQkthSOGv+BE+rS0FPTvKCoqFIsrEaERTFjE+6n3tmz5Lv1BqlKp8Gzj/eabsZ07ddNvOnwk8edfdhUU5PF4Zu1DO74bO9fBwbGx9VA0v/lWTPyaLSEhYaDLxoQ1t27/JZNJnZxcxoyeMGzoqB9+TPhx+2Z4OpTgsTNnw8rGDn3w15+3/bAxbvma+O9W5eQ8FFpaTZr05uBBI27euj57zjuww4SJwyf/Z9obU95BhGcx1aJZoVB8NP89Dpf71ar1G9ZtD2oXumjxnJKSYtiUlHTzq6+XjR41fuuWvXFffCuuqlz6+fwm1tdl5aqlpWUlXyxf8/3Wn0ZFx6z5dsWf16/EvPb6qFExoGzi/lPDho5u4tBsNlsqlWzfsWXpkpW/Hfw9KmrIN2viYFNIcNjiRXGwQ8LGHeNjpiDCc5hqRGSxWN98nWBnJ7KysoaHU6fM2L9/T/Ld230iB2Q9zODxeANfHQZauLq4LVm0orCoAPZpbH1dMrMeRI98LTCgHSy7Dh/j5xvg6OhsZmbG4/IYDIb+WGq1urFD67dOiJmiD8CDBo6AUJqRkda9e08+XwBrLC2F8GqI8BymKiLIpFKr4teufJCRJpFU60+KraoSw32HsM4gzfuzpkGZ2KlTN2cnF1tbuybW1yW8xyu79/wAL9itW0RoSIfAwOCXOrQeb29f/QJoB/fVkmpEeBGmWjTn5mbPmfuOUqlcuODzTRt3JmzYUbvJw8Pzu/ht0AretHkt1MlmvjvlXkpyE+vr8t9ZC6ZNjU1KujH3w5nRo/vDnhDhmn9oPRB3n3lMpkJtBqYaEc+cPaHRaD75eLn+V4dGRt2tPj6+nyxcBjtAdnDrtvULP571054jXC63wfV1nwjRbvTo8XArLy87cfLw1u/XW1vbjBs7qfmHJvwzTDUiqlRKaPnWxp6Tp576lJKSfPduEnpSjwwL6zT1jRmQNwGxGltf+0SJRHLy1FF9CIRSO+a1yUFBIdCmbv6hXwiZKLoxTFXEwIBg0OjosV/LykoTD+67n3oXQlfG40qb5Oq1yx8vmn3u/Om8/FzIsEBLwsnR2dHRqbH1ta8JNcj4tV9Cyxq25hfknTp9DNKHoCxssrCwhANBu7uwsKCJQzfxhoVP6otXrlyEV0CE5zDVojk8/JXXxv0nYVP8+g2ru3WNmD9v6c+/7Ny950cmkwnZQbVatXHjGkjECAQWwcHtV8TFg2STJk5tcH3tawoEgi9XfAcJwtlz3oYqIOQRIeEHrWzY1K/vwOMnDs35cMaE8VNgZWOH9vUNaOwN+/kFdu0avmHjN0VFBTPemYUIz4LLJEy/fJsb1kfk0IakNp7h0sGiNgHmgV2FqLVDuvgIWEBEJGABEZGABUREAhYQEQlYQEQkYAERkYAFREQCFhARCVhARCRgARGRgAVERAIWEBEJWICLiFYiro5BBo3Wh8dncXm0mAQBlw/JEzBL82oQ4VlyUqW2zlxEA3AR0TOQLy5WIkIdJGKV0JZj40BENCDu/nwLa9bVoyWI8D/O7i7oFS1C9ACv6zVfOVpeWaxy8jIXuZrR7crZehgMXVW5uqpMeeVwyaQFbaxEdLksHF4iAll3pek3JTUyTXlBoyW1UqlkPQFRgFajUapUBpuPQS6Xc7nc2s9iJmBxuAxnH7NuA+1YLAaiDdiJ+EKys7MPHDjwwQcfIGpYunTp+fPnly9f3r17d0Q9EokkLi4ODofojSmJKBaLCwsLnZycrKysEDXcu3fvk08+AdfDw8Pj4+ORAdm7d29oaGhgYCCiJSZTDystLY2Ojvby8qLOQmD37t1gIXo8IWLapUuXkAEZMmQIxMXKSprOoWgaIkJFCvw4c+YMVKcQZaSkpNy4cUO/DN7v2rULGRALC4sdOx5Po/Pw4cPc3FxEM0xAxDlz5kD9oWPHjohidu7cWVRUVPsQimkDB0XA2tra2dk5NjYWjo7oBO4i7tmzZ9iwYXw+H1EM/PC14VAPVEn1IcrA8Hi8gwcPQiEAy/QpqfEV8eLFi3APFkZGRiLq2b59O4RDrVar+x+w8v79+8hIdOr0eM4dCI3nzp1DNADTVjN8+8ePH//iiy+QwYGaIjQajBILGwT+QyZPnqxWq9ns1jxUCtOIyGQyjWIhhoCFcL969Wr41d9QSQAAD6ZJREFUz0StF7xELC8vnz59Oiz06tULEeowb948KCVqalrtACW8oj38369atQoRGgKKCCig9Q35iIgI1LrAJSIePnwY7pctW0ZpvtrUgWpijx49oA8mOTkZtS6wEHHhwoUCgQARmgHUnqHvEdKNsHzrVuu5fqCRRayoqID78ePHGyZH02pwc3t85cANGzYcPXoUtQqMKeKxY8cSExNhISQkBBFenoSEBOgYhIX8fJO/1qQxRbxw4cIbb7yBCP8CfXph9+7d27ZtQ6aMcUQ8ffo03JNBeC2FvjseFmQyGTJNDC2iSqXq1q1bWFgYIrQoU6dORU/6RXfu3IlMEIOKCJ25ZWVlkAmzs7NDBAqIioqCLxl6KU1u4L3hRIyLi6uqqnJycmrdfaZGZ/bs2e7u7pCOOHjwIDIdDOQEJGB9n4AI1KNvSt++fRvi4siRI5EpQLmIUExwuVwvL6/g4GBEMCCLFy/OzMyEhWvXrnXt2hXhDbVFM3wR0DT28fEhHSdGwdvbG+6vX7/+9ddfI7yhUETooTfWIOd/yfPXaDZpZs6cCZkK9OTUVYQrVIm4b9++v/76q0OHDsjUuHPnzvDhw1HromfPnuhJTwy2p2VRJSI0jaEHD5ka+oEtEyZMQK0R+B/Td+5jCFWnCkDiGlKGkKxBpsP3339fWlo6b9481EqBTycUCik9JfcfY3pTjlBEfHw8i8WKjY1FBGNAYWMFMqtGPAvupYBku5WVVau3cO7cudj+IhSK6OzsbBIjNxctWgSZ9tdffx21dqBohioTwhIKi2b1Eww2v9s/A8J2//79Bw8ejGgAqSNiyttvvw0N5N69eyOCsaG2ZyUyMlKpxHRm7IkTJ06fPp1WFtK0jgj4+flBXzPCj+joaKga6qf1oA80rSNiS1RU1JYtWzw8PBDNoG8dERorWq0Wn08O7wfK4l9//ZWMzMUNaovm7OxsqIohPBCLxREREadPn6athfStI3p7eysUChxmbCkoKIB64dWrVzFPJ1EKqSMamQcPHsyaNevQoUOI3tA6j1hVVcVkMvWD140C9O5AD97evXsRAWMoP3nq0qVLK1asQEYCjr527VpioR761hGB0NDQM2fODB06FJqrBpiQvS4nT54EBbdu3YoIT6BjHRE6LZKSkuqNube1tYXoaBgdExMTr1y5YsRgjCE41xGpioibNm1ycXGptxJarBAgEfXs3Lnzzp07xMJ6iEQiPC1ElBbN7777ro2NTe1DCL3t2rUzwNn1CQkJRUVF0IOHCM9C0zpi3759hwwZwuH8faFXUFB/LhmlrF69msFgzJ49GxGeg9Z5xBkzZly7dg3kgP6M9evX+/j4IMr4/PPPIYWOT18ObtCxjlhLfHy8h4cH9DhbW1tTauH8+fNDQkKIhU2Acx2xWTU2tUorl2jRP4Tx8UfLlixZ0ql9z+oKqk5cX7J4yaDh/QYMGIAIjQN1xGnTpgUEBCD8eEHRnHKtKumCuLxQaW5ByeXiWwT4CFyBtiJf5xUs6NjX2tnLHBHqAPkyqBrBtwT3+jWw7Ofnt2fPHoQNTUXEayfKS/NVvUY5WdpyEPbAlysuUf3+S1H4ELs2gZRfRNKE8Pf3T01NhY7W2jXQ4/rWW28hnGi0jnj1WLm4RN0r2tEkLATg393agTv0LXd4549STHUGXyqIiYkxN3+mlGjTpk2/fv0QTjQsYkWxsjRP0X2oAzJB+k10vnkW04k1jMKIESNcXV1rH/L5fAzn0G9YRLAQahTINOHyWJUlqqpyTBNmRgGSCbXtZchw9enTB2FGwyJKxBp7dxMeQOruL6goJiI+BYKi/hpBAoFgypQpCD8aFlGl0Kpq/nG+xvhIKlU6DZnT5xkgKEIvF4RDPC/yReZVx5FH96WQc5VVaZRybY1cg1oCAeoe2e496O4/tbsItQQCIVur0cG9QMhy8jKztPlXjVoiIkakXq9Kuyl9dE/q4idUqXQsNovFYSNmi2UtuvYYAvfVLZRRkNYw1EqVNlup0+qq9peaC1htwwTtwoUWVv/kDRMRsSD9ZvWFxDIbFwGLJ2g3wL4282wqOPgiebUiJ0t271q+VxC/50g7Nufleo+JiEZGo9Ed3loorUZu7Z255ib8c5hb8uAm8rIpzxFvWpAVOdY+qJuw+U8nIhqT4pyafWtyfbq5CN15qLVg624Ftzt/lJTkKXqPsm/ms3C5gj0NEZcpj2wrbtcf6vmtx8JaHP3ty0qZUN9o5v5ERONQ+KgmcX2hZxdX1HqxdbcuLkRHfyxszs5ERCOgVmn3r81r07k1W6jHro21TMq8furFPa5ERCNw+Psin+6t30I9dl52j1IVOenSpncjIhqau3+IpVIGT2AaY5paBL5IeO6XF1QWiYiG5tJv5Q7etohOmAt5TDYbcqVN7IORiEs+nTdn7gzUqkm+LLZrY8nmYTrc/Xby6bmLukmllailsfOyvXulqSsBtpiIBxJ/WrHyU0RokvvXJTwBHefF4/E55YXKiqJGJ1RvMRHT0nCcKxsrVAptSU6NhR1NT6kRiPiZdxoNii3TszJr9vTbt2/AwvHjhzYl7PRt63/nzq3NW78DO6HbNDAg+K233gsMaKff+fCRxJ/27cjPzzU353frGj7jnf/a2tafwhX2+fmXXQUFeTyeWfvQju/GznVwcEQmzsMUqcjLElHGzaQT5y7tKirJ4vH4HUKiBvWfweU+jr7b9yyEvmt/3x5nz28XV5c4iNpED53bxj0EPe5gVB888s2NpGM6rTbIv2db786IMizt+YXZjVYTWyYiLvtstZ9vQN8+UYn7T3l7tc3JeTR33kx7kcO6tT98F7/NnM+f++GM4uLHo49OnDj81dfLogYM+X7L3s8+XZWWfn/Bwg/qnUmYlHQT9hk9avzWLXvjvvhWXFW59PP5yPQRl6g1KqpGMyTfO7dz3yK/tl3nxO54LXpR0t0zP/8ap9/EYrGzHt3Ozrk7a+b2Tz86xudb7d2/TL/pzPkfr15PHD5o1n9nbvfyDDt17ntEGRweuyBT3tjWlhHRwsKCxWZzuFwrK2sWi3Xw158h2i2Y/5mPjy/cPl6wTK1WHz/xeMLWfT/vjIjoPXHCG+7ubcLCOr337ofgYnLy7bqvlvUwg8fjDXx1mKuLW1Bg8JJFK2JnzkGmj6RSTV0z5cyF7d6eHQcPmCmycw/0Cx8SFXvj9rFK8d9DD5VKOdjG45pDjOwYOrC49KFS+Xg+6b9uHw0O6t214zB4VnjX0X4+FM4JwzFj10gbHVtJSas5LT0FAmTtfEt8Ph+0y8hIAx0zMtODAkNq9/T3D4L7BxlpdZ/eIawzFOjvz5p26PCBgsJ8KLhBR2T6yCQaikTUarW5+SkQDmvXgJRwX1D4QP8QPNMX0wDf/PGgGJm8Sq1WlZbluLsG1T7Lw60dohKegCWtavgUDkpG38hkUjtbUd01fL4AVspr5FAKw/LT9eaPT0CWy58Zq+nh4QkF+u69P27avLZ69fLAwGCoI7YCF6mbZUilqtFqNSfObD559plZSauqS/ULbPbz4yp0ECbhD6fOJqhcIirRaXSNDbWkRESBwEIqfaZ9BA9BTXMzcyaTCUY+Xf9kGfav9wpQoH+ycJlGo4FGz9Zt6xd+POunPUewnbelmVhYsUpKWmbcfz04HDOoCPbs/lq3TsOfOaKgqcw550mMlCue/lJyeVM5538JxCBljZZv2bByLVk017Y5/P2CUtNSamdAq5ZUZ2c/DAh4PDliWx+/O8lPr517724S+l8BXUtKSvLdJ+uhugn1yKlvzBCLK8vLmzugCFssrNlqJSUiwr+3q3NARWWBg72n/mZr48pksvn8poamcthcG2vngsL02jVpGdcQZagVGjNBozWTFhPR0sLywYPU9AepIM2IEWMVipqVX30GzefMzAfLln8MMe/VqKGw29ixk65cuQjpm8LCgpu3rq9d91X79h0DnhXx6rXLHy+afe786bz8XHjB/fv3ODk6Ozo6IRPH2p7DZlF1bmRkz0l37p2FVnBxyaO8/NRdPy9Zt2V6Tc0LhhpAlgea21euJ0Jt8tylnfkFaYgylHK1s3ejOdQWK5qjo2PiVix+/4M3l366qmuXHqu+XLdpy9pp08dDVAsJDvvm6wRr68ezx/bvNxAcBRE3b/kO7OwZEfn22x/Ue6lJE6dCPXrjxjWlZSWwT3Bw+xVx8SZ3GsfzeLYTHPuxUOQtQhQQ2q7P+NFLz17Yfvz0JjMzC0+P0BlT15uZCZp+1oC+06SyykPH4rU6baBfxJCod7fvXQDLiAKkpVLf0EaHADc8G9i14+XQum8faap982d257fvZQU/PMKMA+vy2UJLSxEd54jKuJwzZparlV3Dw47I6BuDEtDVQiFRIPpRI1GK3HiNWYjIyVMGJrCL8I9DD4WOFlzzhn+S5JTze/YvbXCTwNxKKhc3uKl7p5FDB76HWoisR7e27mi4BwGSREwGEzVUTerRZRRk0VEjlGaW9xxmjRqHiGhoeo20+/N0hUu7hmda8/PpOnvm/zW4CfpCapPS9eDxWrIS4uYS2Nh7UKkULBan7lSLzXkP0ooaDkfnGdTUmyQiGhrfDpbpt6Q11YoGT94D1Wy5LsiocDg8W5uWfA81FdV9xr6giUbqiEZg8BtOmdfytVpaTBNVlFbi38Hc4UWTyxERjcP4eR6ZV3JRa6covczemRkcbvXCPYmIxsHGgTvhI9f0i9katQlP/9c0JRllPkGcvuOaNe8wEdFo8C04r81xAxelFXLUutCqtXnJhZ5+7M79bZr5FCKiMRHact750oejlebeLpBXtZL8YklWRer57J5DrLtEvUSHCGk1G5+oSY45abLzB0p5Fjwmlyu0F2B7ml8TSMrkklJZVbGk/SvWY2e+9CXGiIhY4O7Hn/iRx6N70rRb0sxreTbO5soaLZvLZnHZDCamnexMFlMlV2pUGqTTVhTIoV0c1EkQ1N3zZWdG1ENExIg2QYI2T7K+Rdk1T6YuVtfItAoZJSPH/j3mFjoGky0Q8vhCtrOXE4f7r6p5REQccfQwc/RAtKJhEblmDC0y4WFXAmsOk2Xyw8ZoRcPh1NKGU/LIhHMK2SkSWyfTPq+AbjQsooM7z3THocolapErz8Ka1DpMiUYjomtbs/O/NGuuT9w4tSO/y4Dm5lEJmNDU9Zrv/iFOvyVp39vOxpHLYuOe+q6RaapKlZcOFg+c7OjgQceJjkyaF1w4POuu9Na5ysKsGhYb66LaSsSpKld5Bgk6D7CBblxEMDVeIGItCjnWffM6LTITkO5KE6a5IhIIlEKalgQsICISsICISMACIiIBC4iIBCwgIhKw4P8BAAD//2v4e7oAAAAGSURBVAMA1x7mMDWkAPIAAAAASUVORK5CYII=",
625
+ "text/plain": [
626
+ "<IPython.core.display.Image object>"
627
+ ]
628
+ },
629
+ "metadata": {},
630
+ "output_type": "display_data"
631
+ }
632
+ ],
633
+ "source": [
634
+ "from IPython.display import Image, display\n",
635
+ "\n",
636
+ "display(Image(graph.get_graph(xray=True).draw_mermaid_png()))"
637
+ ]
638
+ },
639
+ {
640
+ "cell_type": "code",
641
+ "execution_count": null,
642
+ "id": "5987d58c",
643
+ "metadata": {},
644
+ "outputs": [],
645
+ "source": [
646
+ "question = \"\"\n",
647
+ "messages = [HumanMessage(content=question)]\n",
648
+ "messages = graph.invoke({\"messages\": messages})"
649
+ ]
650
+ },
651
+ {
652
+ "cell_type": "code",
653
+ "execution_count": null,
654
+ "id": "330cbf17",
655
+ "metadata": {},
656
+ "outputs": [],
657
+ "source": [
658
+ "for m in messages['messages']:\n",
659
+ " m.pretty_print()"
660
+ ]
661
+ }
662
+ ],
663
+ "metadata": {
664
+ "kernelspec": {
665
+ "display_name": "aiagent",
666
+ "language": "python",
667
+ "name": "python3"
668
+ },
669
+ "language_info": {
670
+ "codemirror_mode": {
671
+ "name": "ipython",
672
+ "version": 3
673
+ },
674
+ "file_extension": ".py",
675
+ "mimetype": "text/x-python",
676
+ "name": "python",
677
+ "nbconvert_exporter": "python",
678
+ "pygments_lexer": "ipython3",
679
+ "version": "3.12.9"
680
+ }
681
+ },
682
+ "nbformat": 4,
683
+ "nbformat_minor": 5
684
+ }