Update app.py
Browse files
app.py
CHANGED
@@ -12,14 +12,41 @@ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
|
12 |
|
13 |
login(token=os.environ["HUGGINGFACEHUB_API_TOKEN"])
|
14 |
|
|
|
15 |
search_tool = DuckDuckGoSearchTool()
|
16 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
async def run_and_submit_all(profile: gr.OAuthProfile | None):
|
18 |
log_output = ""
|
19 |
|
20 |
try:
|
21 |
agent = ToolCallingAgent(
|
22 |
-
tools=[search_tool],
|
23 |
model=InferenceClientModel(model="deepseek-ai/DeepSeek-V3", provider="together"),
|
24 |
max_steps=15,
|
25 |
verbosity_level=0,
|
|
|
12 |
|
13 |
login(token=os.environ["HUGGINGFACEHUB_API_TOKEN"])
|
14 |
|
15 |
+
## TOOL 1
|
16 |
search_tool = DuckDuckGoSearchTool()
|
17 |
|
18 |
+
## TOOL 2
|
19 |
+
from smolagents import Tool
|
20 |
+
from youtube_transcript_api import YouTubeTranscriptApi
|
21 |
+
import re
|
22 |
+
|
23 |
+
class YouTubeTranscriptTool(Tool):
|
24 |
+
name = "youtube_transcript"
|
25 |
+
description = "Use this to extract spoken content from a YouTube video given its URL."
|
26 |
+
|
27 |
+
def use(self, input_text: str) -> str:
|
28 |
+
try:
|
29 |
+
# Extract video ID from URL
|
30 |
+
video_id_match = re.search(r"(?:v=|youtu\.be/)([a-zA-Z0-9_-]{11})", input_text)
|
31 |
+
if not video_id_match:
|
32 |
+
return "Invalid YouTube URL provided."
|
33 |
+
|
34 |
+
video_id = video_id_match.group(1)
|
35 |
+
transcript = YouTubeTranscriptApi.get_transcript(video_id)
|
36 |
+
|
37 |
+
# Combine into a single string
|
38 |
+
full_text = " ".join([entry['text'] for entry in transcript])
|
39 |
+
return full_text[:3000] # limit to 3000 chars
|
40 |
+
except Exception as e:
|
41 |
+
return f"Error fetching transcript: {e}"
|
42 |
+
transcript_tool = YouTubeTranscriptTool()
|
43 |
+
|
44 |
async def run_and_submit_all(profile: gr.OAuthProfile | None):
|
45 |
log_output = ""
|
46 |
|
47 |
try:
|
48 |
agent = ToolCallingAgent(
|
49 |
+
tools=[search_tool, transcript_tool],
|
50 |
model=InferenceClientModel(model="deepseek-ai/DeepSeek-V3", provider="together"),
|
51 |
max_steps=15,
|
52 |
verbosity_level=0,
|