arre99 commited on
Commit
7a165bc
Β·
1 Parent(s): 428a745

all commits squished into one

Browse files
app.py CHANGED
@@ -134,7 +134,7 @@ iface_constructor_info = gr.Interface(
134
  )
135
 
136
 
137
- # Create your markdown-only tab using Blocks
138
  with gr.Blocks() as markdown_tab:
139
  gr.Markdown(MARKDOWN_INTRODUCTION)
140
 
 
134
  )
135
 
136
 
137
+ # About introduction tab
138
  with gr.Blocks() as markdown_tab:
139
  gr.Markdown(MARKDOWN_INTRODUCTION)
140
 
assets/image_base64.json ADDED
The diff for this file is too large to render. See raw diff
 
mcp_client.py CHANGED
@@ -3,23 +3,26 @@ import datetime
3
  import gradio as gr
4
  import openf1_tools
5
  from smolagents import InferenceClientModel, LiteLLMModel, ToolCallingAgent, MCPClient
 
 
 
6
 
7
  # Can manully set this to a specific time to make the agent think it is in the past
8
- time = datetime.datetime.now().astimezone().isoformat()
 
9
 
10
- SYSTEM_PROMPT = f"""You are a helpful Formula 1 assistant and strategist. You have access to various F1 data and tools to help answer questions about races, drivers, teams, and more.
11
- Be concise and accurate in your responses. If you don't know something, use the available tools to find the information.
12
  In addition, you will be asked to act as a live race engineer strategist during a Formula 1 race, making crucial calls during the event.
13
  For formula 1 related tasks, start by calling get_api_endpoints() to see all available endpoints and use them to access the OpenF1 API.
14
- Then retrieve information about a specific endpoint to make sure it does what you want it to do.
 
15
  Lastly, combine the endpoint and filters to create a request to the OpenF1 API and call send_request() to send the request.
16
 
17
- Current time (ISO 8601): {time}"""
18
-
19
 
20
  def agent_chat(message: str, history: list):
21
  message = f"{SYSTEM_PROMPT}\n\nTask: {message}"
22
- return agent.run(message)
23
 
24
 
25
  if __name__ == "__main__":
@@ -27,6 +30,7 @@ if __name__ == "__main__":
27
  list_tools = False # Set to True to only list tools (used for debugging)
28
  local_model = False # If you have Ollama installed, set this to True
29
  openf1_tool_only = True
 
30
 
31
  try:
32
 
@@ -55,13 +59,32 @@ if __name__ == "__main__":
55
  num_ctx=32768,
56
  )
57
  else:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  model = InferenceClientModel(
59
- model_id="mistralai/Mistral-Nemo-Instruct-2407",
60
- provider="nebius",
61
- api_key=os.getenv("NEBIUS_API_KEY")
 
62
  )
63
 
64
  agent = ToolCallingAgent(model=model, tools=[*tools])
 
65
 
66
  # Launch chat interface
67
  chat_interface = gr.ChatInterface(
 
3
  import gradio as gr
4
  import openf1_tools
5
  from smolagents import InferenceClientModel, LiteLLMModel, ToolCallingAgent, MCPClient
6
+ from dotenv import load_dotenv
7
+
8
+ load_dotenv()
9
 
10
  # Can manully set this to a specific time to make the agent think it is in the past
11
+ time = datetime.datetime.now(datetime.timezone.utc).replace(microsecond=0).isoformat()
12
+ spanish_gp_race_plus1h = "2025-06-01T14:00:00Z"
13
 
14
+ SYSTEM_PROMPT = f"""You are a helpful Formula 1 assistant and strategist. You have access to various F1 data and tools to help answer questions about races, drivers, teams, and more. Be concise and accurate in your responses. You must use the available tools to find the information.
 
15
  In addition, you will be asked to act as a live race engineer strategist during a Formula 1 race, making crucial calls during the event.
16
  For formula 1 related tasks, start by calling get_api_endpoints() to see all available endpoints and use them to access the OpenF1 API.
17
+ Then retrieve information about a specific endpoint, using get_endpoint_info(endpoint), to make sure it does what you want it to do.
18
+ If you are unsure what a filter does, get its description using get_filter_info(filter_name).
19
  Lastly, combine the endpoint and filters to create a request to the OpenF1 API and call send_request() to send the request.
20
 
21
+ Current UTC time (ISO 8601): {spanish_gp_race_plus1h}"""
 
22
 
23
  def agent_chat(message: str, history: list):
24
  message = f"{SYSTEM_PROMPT}\n\nTask: {message}"
25
+ return agent.run(message, max_steps=50)
26
 
27
 
28
  if __name__ == "__main__":
 
30
  list_tools = False # Set to True to only list tools (used for debugging)
31
  local_model = False # If you have Ollama installed, set this to True
32
  openf1_tool_only = True
33
+ provider = "nebius" # "nebius" (mistral) or "sambanova" (deepseek)
34
 
35
  try:
36
 
 
59
  num_ctx=32768,
60
  )
61
  else:
62
+
63
+ # Get model ID
64
+ model_id_env_mapping = {
65
+ "nebius": "deepseek-ai/DeepSeek-R1-0528",
66
+ "sambanova": "deepseek-ai/DeepSeek-R1-0528"
67
+ }
68
+ model_id = model_id_env_mapping[provider]
69
+
70
+ # Get API key from environment variable
71
+ provider_env_mapping = {
72
+ "nebius": "NEBIUS_API_KEY",
73
+ "sambanova": "SAMBANOVA_API_KEY"
74
+ }
75
+ api_key = os.getenv(provider_env_mapping[provider])
76
+
77
+ print(f"Model ID: {model_id} | API key: {api_key}")
78
+
79
  model = InferenceClientModel(
80
+ model_id=model_id,
81
+ provider=provider,
82
+ api_key=api_key,
83
+ temperature=0
84
  )
85
 
86
  agent = ToolCallingAgent(model=model, tools=[*tools])
87
+ # invoked through agent.run("This is the task i want you to do.")
88
 
89
  # Launch chat interface
90
  chat_interface = gr.ChatInterface(
todo.txt CHANGED
@@ -1,7 +1,5 @@
1
- - Record demonstration using Loom and add link to README
2
-
3
  - Video walkthrough
4
- 1. first demo the Gradio UI
5
- 2. then demo the MCP client using mcp_client.py
6
- 3. then demo the MCP client using Claude Desktop
7
- 4. finally add the Loom video embedded in the "About" tab
 
 
 
1
  - Video walkthrough
2
+ 1. High overview of the project + Have a nice excalidraw image of the architecture
3
+ 2. first demo the Gradio UI on HF spaces
4
+ 3. then demo the MCP client using mcp_client.py locally by calling Deepseek through nebius provider
5
+ 4. finally demo the MCP client using Claude Desktop
utils/constants.py CHANGED
@@ -1,6 +1,9 @@
1
  import json
2
  import datetime
3
 
 
 
 
4
  # Variables
5
  CURRENT_YEAR = datetime.datetime.now().year
6
 
@@ -33,7 +36,7 @@ CONSTRUCTORS_PER_SEASON: dict[int, list[str]] = json.load(open("assets/construct
33
  # Load in driver per season
34
  DRIVERS_PER_SEASON: dict[int, list[str]] = json.load(open("assets/drivers_per_season.json"))
35
 
36
- MARKDOWN_INTRODUCTION = """
37
  # 🏁 Formula 1 MCP server 🏎️
38
 
39
  Welcome to the Formula 1 MCP server, your one-stop destination for Formula 1 data retrieval and race real-time race strategy analysis.
@@ -45,6 +48,10 @@ This application leverages the FastF1 library and OpenF1 API to provide detailed
45
  Video is work in progress πŸ‘€
46
  <!-- <iframe src="https://www.loom.com/embed/4e0a5dbf7b8e4c428a7e2a8a8a8edc3b" frameborder="0" webkitallowfullscreen mozallowfullscreen allowfullscreen style="position: absolute; top: 0; left: 0; width: 100%; height: 100%;"></iframe> -->
47
 
 
 
 
 
48
 
49
  ## Available Tools in Gradio UI
50
 
@@ -74,8 +81,7 @@ There are different ways to interact with the MCP server:
74
  2) (Good for demo) One can also use the Gradio interface directly to interact with the MCP server's tools. Note, however, the UI for the OpenF1 tools is purely limted to strings and JSON.
75
 
76
  3) (Advanced) One can establish an MCP client by running `mcp_client.py`. This client is connected to the MCP server hosted on HuggingFace spaces.
77
-
78
-
79
 
80
  ## MCP json configuration file
81
 
@@ -153,4 +159,4 @@ Retrieve data about meetings in 2023 for Singapore <br>
153
  Retrieve data about pit stops with session_key 9158 where the pit duration was less than 31s <br>
154
  ```https://api.openf1.org/v1/pit?session_key=9158&pit_duration<31```
155
 
156
- """
 
1
  import json
2
  import datetime
3
 
4
+ # Architecture image
5
+ IMAGE_BASE64: str = json.load(open("assets/image_base64.json"))['image_base64']
6
+
7
  # Variables
8
  CURRENT_YEAR = datetime.datetime.now().year
9
 
 
36
  # Load in driver per season
37
  DRIVERS_PER_SEASON: dict[int, list[str]] = json.load(open("assets/drivers_per_season.json"))
38
 
39
+ MARKDOWN_INTRODUCTION = f"""
40
  # 🏁 Formula 1 MCP server 🏎️
41
 
42
  Welcome to the Formula 1 MCP server, your one-stop destination for Formula 1 data retrieval and race real-time race strategy analysis.
 
48
  Video is work in progress πŸ‘€
49
  <!-- <iframe src="https://www.loom.com/embed/4e0a5dbf7b8e4c428a7e2a8a8a8edc3b" frameborder="0" webkitallowfullscreen mozallowfullscreen allowfullscreen style="position: absolute; top: 0; left: 0; width: 100%; height: 100%;"></iframe> -->
50
 
51
+ ## Architecture
52
+
53
+ <img src="data:image/png;base64,{IMAGE_BASE64}" width="800" />
54
+
55
 
56
  ## Available Tools in Gradio UI
57
 
 
81
  2) (Good for demo) One can also use the Gradio interface directly to interact with the MCP server's tools. Note, however, the UI for the OpenF1 tools is purely limted to strings and JSON.
82
 
83
  3) (Advanced) One can establish an MCP client by running `mcp_client.py`. This client is connected to the MCP server hosted on HuggingFace spaces.
84
+ """ + """
 
85
 
86
  ## MCP json configuration file
87
 
 
159
  Retrieve data about pit stops with session_key 9158 where the pit duration was less than 31s <br>
160
  ```https://api.openf1.org/v1/pit?session_key=9158&pit_duration<31```
161
 
162
+ """
utils/notebooks/openf1_api_playground.ipynb CHANGED
@@ -16,13 +16,25 @@
16
  },
17
  {
18
  "cell_type": "code",
19
- "execution_count": 2,
20
  "id": "f7e75102",
21
  "metadata": {},
22
- "outputs": [],
 
 
 
 
 
 
 
 
 
 
 
 
23
  "source": [
24
- "spain_race_time = \"2025-06-08T15:00:00Z\" # june 1 2025 15:00\n",
25
- "spain_qualy_time = \"2025-06-07T15:00:00Z\" # may 31 2025 16:00\n",
26
  "now = datetime.now()"
27
  ]
28
  },
 
16
  },
17
  {
18
  "cell_type": "code",
19
+ "execution_count": null,
20
  "id": "f7e75102",
21
  "metadata": {},
22
+ "outputs": [
23
+ {
24
+ "ename": "NameError",
25
+ "evalue": "name 'timedelta' is not defined",
26
+ "output_type": "error",
27
+ "traceback": [
28
+ "\u001b[31m---------------------------------------------------------------------------\u001b[39m",
29
+ "\u001b[31mNameError\u001b[39m Traceback (most recent call last)",
30
+ "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[2]\u001b[39m\u001b[32m, line 3\u001b[39m\n\u001b[32m 1\u001b[39m spain_race_time = \u001b[33m\"\u001b[39m\u001b[33m2025-06-08T14:00:00+00:00\u001b[39m\u001b[33m\"\u001b[39m \u001b[38;5;66;03m# june 1 2025 15:00\u001b[39;00m\n\u001b[32m 2\u001b[39m spain_qualy_time = \u001b[33m\"\u001b[39m\u001b[33m2025-06-07T14:00:00+00:00\u001b[39m\u001b[33m\"\u001b[39m \u001b[38;5;66;03m# may 31 2025 16:00\u001b[39;00m\n\u001b[32m----> \u001b[39m\u001b[32m3\u001b[39m spain_qualy_time_plus1 = (datetime.strptime(spain_qualy_time, \u001b[33m\"\u001b[39m\u001b[33m%\u001b[39m\u001b[33mY-\u001b[39m\u001b[33m%\u001b[39m\u001b[33mm-\u001b[39m\u001b[38;5;132;01m%d\u001b[39;00m\u001b[33mT\u001b[39m\u001b[33m%\u001b[39m\u001b[33mH:\u001b[39m\u001b[33m%\u001b[39m\u001b[33mM:\u001b[39m\u001b[33m%\u001b[39m\u001b[33mS\u001b[39m\u001b[33m%\u001b[39m\u001b[33mz\u001b[39m\u001b[33m\"\u001b[39m) + \u001b[43mtimedelta\u001b[49m(hours=\u001b[32m1\u001b[39m)).isoformat()\n\u001b[32m 4\u001b[39m now = datetime.now()\n",
31
+ "\u001b[31mNameError\u001b[39m: name 'timedelta' is not defined"
32
+ ]
33
+ }
34
+ ],
35
  "source": [
36
+ "spain_race_time = \"2025-06-08T14:00:00+00:00\" # june 1 2025 15:00\n",
37
+ "spain_qualy_time = \"2025-06-07T14:00:00+00:00\" # may 31 2025 16:00\n",
38
  "now = datetime.now()"
39
  ]
40
  },