keynes42 commited on
Commit
0a6e6cf
·
verified ·
1 Parent(s): 7a6f2df

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -7
app.py CHANGED
@@ -5,9 +5,10 @@ import requests
5
  import inspect
6
  import subprocess
7
  import pandas as pd
8
- import torch, spaces
9
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
10
  from smolagents import CodeAgent, HfApiModel
 
11
  from huggingface_hub import InferenceClient, hf_hub_download
12
 
13
  subprocess.run(["playwright", "install"], check=True)
@@ -61,7 +62,7 @@ def check_token_access():
61
  # --- Basic Agent Definition ---
62
  # ----- THIS IS WHERE YOU CAN BUILD WHAT YOU WANT ------
63
  class BasicAgent:
64
- def __init__(self, model_id="meta-llama/Llama-3.1-8B-Instruct", hf_token=None):
65
  print("BasicAgent initialized.")
66
  print("ENV-HF_TOKEN-LEN", len(hf_token), file=sys.stderr)
67
  check_token_access()
@@ -97,7 +98,7 @@ class BasicAgent:
97
  model=mod,
98
  tokenizer=tok,
99
  max_new_tokens=512,
100
- temperature=1.0,
101
  )
102
  self.agent = CodeAgent(model=self, tools=[], add_base_tools=True)
103
 
@@ -136,10 +137,11 @@ class BasicAgent:
136
  print(f"Agent returning its generated answer: {response}")
137
 
138
  # wrap back into a chat message dict
139
- return {
140
- "role": 'assistant',
141
- "content": [{"type": "text", "text": response}],
142
- }
 
143
 
144
  __call__ = generate
145
 
 
5
  import inspect
6
  import subprocess
7
  import pandas as pd
8
+ import torch
9
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
10
  from smolagents import CodeAgent, HfApiModel
11
+ from smolagents.models import ChatMessage
12
  from huggingface_hub import InferenceClient, hf_hub_download
13
 
14
  subprocess.run(["playwright", "install"], check=True)
 
62
  # --- Basic Agent Definition ---
63
  # ----- THIS IS WHERE YOU CAN BUILD WHAT YOU WANT ------
64
  class BasicAgent:
65
+ def __init__(self, model_id="meta-llama/Llama-3.1-8B-Instruct", hf_token=""):
66
  print("BasicAgent initialized.")
67
  print("ENV-HF_TOKEN-LEN", len(hf_token), file=sys.stderr)
68
  check_token_access()
 
98
  model=mod,
99
  tokenizer=tok,
100
  max_new_tokens=512,
101
+ # temperature=1.0,
102
  )
103
  self.agent = CodeAgent(model=self, tools=[], add_base_tools=True)
104
 
 
137
  print(f"Agent returning its generated answer: {response}")
138
 
139
  # wrap back into a chat message dict
140
+ return ChatMessage(role="assistant", content=response)
141
+ # return {
142
+ # "role": 'assistant',
143
+ # "content": [{"type": "text", "text": response}],
144
+ # }
145
 
146
  __call__ = generate
147