ayan4m1 commited on
Commit
984fbb4
·
verified ·
1 Parent(s): 3e69d5d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -9
app.py CHANGED
@@ -2,26 +2,33 @@ import gradio as gr
2
  from transformers import pipeline
3
 
4
  models = {
5
- "MagicPrompt": "pszemraj/distilgpt2-magicprompt-SD",
6
  "DistilGPT2 SD": "FredZhang7/distilgpt2-stable-diffusion",
7
- "Llama-SmolTalk-3.2-1B": "prithivMLmods/Llama-SmolTalk-3.2-1B-Instruct"
 
 
 
8
  }
9
- pipelines = {}
10
-
11
- for key, value in models.items():
12
- pipelines[value] = pipeline("text-generation", model=value)
13
 
14
  def respond(
15
  message,
16
  _: list[tuple[str, str]],
 
17
  model: str,
18
  max_new_tokens: int,
19
  temperature: float,
20
  top_p: float,
21
  top_k: int
22
  ):
23
- yield pipelines[model](
24
- message,
 
 
 
 
 
 
 
 
25
  max_new_tokens=max_new_tokens,
26
  do_sample=True,
27
  temperature=temperature,
@@ -38,7 +45,8 @@ demo = gr.ChatInterface(
38
  title="Prompt Enhancer Test",
39
  type="messages",
40
  additional_inputs=[
41
- gr.Radio(list(models.items()), value="pszemraj/distilgpt2-magicprompt-SD", type="value", label="Model"),
 
42
  # gr.Textbox(value="Enhance the provided text so that it is more vibrant and detailed.", label="System prompt"),
43
  gr.Slider(minimum=8, maximum=128, value=64, step=8, label="Max new tokens"),
44
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
 
2
  from transformers import pipeline
3
 
4
  models = {
 
5
  "DistilGPT2 SD": "FredZhang7/distilgpt2-stable-diffusion",
6
+ "Llama-SmolTalk-3.2-1B": "prithivMLmods/Llama-SmolTalk-3.2-1B-Instruct",
7
+ "Dolphin-Phi 3 2.9.2": "cognitivecomputations/dolphin-2.9.2-Phi-3-Medium",
8
+ "EXAONE 3.5 2.4B": "LGAI-EXAONE/EXAONE-3.5-2.4B-Instruct",
9
+ "Granite 3.3 2B": "ibm-granite/granite-3.3-2b-instruct"
10
  }
 
 
 
 
11
 
12
  def respond(
13
  message,
14
  _: list[tuple[str, str]],
15
+ system_prompt: str,
16
  model: str,
17
  max_new_tokens: int,
18
  temperature: float,
19
  top_p: float,
20
  top_k: int
21
  ):
22
+ pipe = pipeline("text-generation", model=value, trust_remote_code=True)
23
+
24
+ yield pipe(
25
+ [
26
+ *message,
27
+ {
28
+ "role": "system",
29
+ "content": system_prompt
30
+ }
31
+ ],
32
  max_new_tokens=max_new_tokens,
33
  do_sample=True,
34
  temperature=temperature,
 
45
  title="Prompt Enhancer Test",
46
  type="messages",
47
  additional_inputs=[
48
+ gr.Textbox(value="When the user provides two sentences, return a longer sentence that fuses the two together with a natural motion in between.", lines=5, show_label=True, label="System prompt"),
49
+ gr.Radio(list(models.items()), value="FredZhang7/distilgpt2-stable-diffusion", type="value", label="Model"),
50
  # gr.Textbox(value="Enhance the provided text so that it is more vibrant and detailed.", label="System prompt"),
51
  gr.Slider(minimum=8, maximum=128, value=64, step=8, label="Max new tokens"),
52
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),