Abdulvajid commited on
Commit
a87c761
·
verified ·
1 Parent(s): 65bafe8

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +44 -0
README.md CHANGED
@@ -32,6 +32,50 @@ model = AutoPeftModelForCausalLM.from_pretrained(peft_model_id,
32
  load_in_4bit=True)
33
 
34
  tokenizer = AutoTokenizer.from_pretrained(peft_model_id)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
  ```
36
 
37
  ## Training procedure
 
32
  load_in_4bit=True)
33
 
34
  tokenizer = AutoTokenizer.from_pretrained(peft_model_id)
35
+
36
+ tools = [
37
+ {
38
+ "type": "function",
39
+ "function": {
40
+ "name": "search_restaurants",
41
+ "description": "Search for restaurants in a specific location",
42
+ "parameters": {
43
+ "type": "object",
44
+ "properties": {
45
+ "location": {
46
+ "type": "string",
47
+ "description": "The location to search for restaurants"
48
+ },
49
+ "cuisine": {
50
+ "type": "string",
51
+ "description": "The cuisine type to filter the restaurants"
52
+ },
53
+ "price_range": {
54
+ "type": "integer",
55
+ "description": "The price range of the restaurants (1 = cheap to 4 = very expensive)"
56
+ }
57
+ },
58
+ "required": ["location"]
59
+ }
60
+ }
61
+ }
62
+ ]
63
+
64
+ messages=[
65
+ {"role": "user", "content": "I'm in Malappuram, can you find a restaurant for me?"}
66
+ ]
67
+
68
+ prompt = tokenizer.apply_chat_template(
69
+ messages,
70
+ tools=tools,
71
+ add_generation_prompt=True,
72
+ tokenize=True,
73
+ return_tensors="pt"
74
+ ).to('cuda')
75
+
76
+ output = model.generate(prompt, max_new_tokens=500)
77
+
78
+ print(''.join(tokenizer.batch_decode(output[0][len(prompt[0]):])))
79
  ```
80
 
81
  ## Training procedure