add model txt
Browse files
app.py
CHANGED
@@ -4,7 +4,6 @@ from huggingface_hub import InferenceClient
|
|
4 |
"""
|
5 |
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
|
6 |
"""
|
7 |
-
client = InferenceClient(model="meta-llama/Meta-Llama-3-8B-Instruct")
|
8 |
|
9 |
|
10 |
def respond(
|
@@ -14,7 +13,9 @@ def respond(
|
|
14 |
max_tokens,
|
15 |
temperature,
|
16 |
top_p,
|
|
|
17 |
):
|
|
|
18 |
messages = [{"role": "system", "content": system_message}]
|
19 |
|
20 |
for val in history:
|
@@ -56,7 +57,9 @@ demo = gr.ChatInterface(
|
|
56 |
step=0.05,
|
57 |
label="Top-p (nucleus sampling)",
|
58 |
),
|
|
|
59 |
],
|
|
|
60 |
)
|
61 |
|
62 |
|
|
|
4 |
"""
|
5 |
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
|
6 |
"""
|
|
|
7 |
|
8 |
|
9 |
def respond(
|
|
|
13 |
max_tokens,
|
14 |
temperature,
|
15 |
top_p,
|
16 |
+
model,
|
17 |
):
|
18 |
+
client = InferenceClient(model=model)
|
19 |
messages = [{"role": "system", "content": system_message}]
|
20 |
|
21 |
for val in history:
|
|
|
57 |
step=0.05,
|
58 |
label="Top-p (nucleus sampling)",
|
59 |
),
|
60 |
+
gr.Textbox(value="meta-llama/Meta-Llama-3-8B-Instruct", label="Model"),
|
61 |
],
|
62 |
+
multimodal=True,
|
63 |
)
|
64 |
|
65 |
|