mlmPenguin commited on
Commit
9aacf3f
·
verified ·
1 Parent(s): 3f777d5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -11
app.py CHANGED
@@ -4,10 +4,10 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
4
 
5
  ### Model Setup ###
6
  # Names for the two models
7
- LLAMA_MODEL_NAME = "meta-llama/Llama-3.1-8B"
8
  QWEN_MODEL_NAME = "Qwen/Qwen2.5-VL-7B-Instruct"
9
 
10
- # Load Meta-Llama
11
  llama_tokenizer = AutoTokenizer.from_pretrained(LLAMA_MODEL_NAME)
12
  llama_model = AutoModelForCausalLM.from_pretrained(LLAMA_MODEL_NAME, device_map="auto")
13
  llama_pipe = pipeline(
@@ -29,7 +29,6 @@ def generate_response(prompt: str, model_choice: str) -> str:
29
  """
30
  Given a prompt and a model choice, generate a response.
31
  """
32
- # Parameters can be tuned as needed.
33
  kwargs = {
34
  "max_length": 256,
35
  "do_sample": True,
@@ -45,9 +44,8 @@ def generate_response(prompt: str, model_choice: str) -> str:
45
 
46
  def chat_logic(user_input: str, chat_history: list):
47
  """
48
- This function builds the prompt from the conversation history,
49
- chooses which model will respond (based on whether the user interjected
50
- or not) and appends the response to the chat history.
51
  """
52
  if chat_history is None:
53
  chat_history = []
@@ -70,7 +68,7 @@ def chat_logic(user_input: str, chat_history: list):
70
  else:
71
  selected_model = random.choice(["Llama", "Qwen"])
72
 
73
- # Build the prompt from the conversation history.
74
  prompt = ""
75
  for speaker, message in chat_history:
76
  prompt += f"{speaker}: {message}\n"
@@ -85,10 +83,10 @@ def chat_logic(user_input: str, chat_history: list):
85
 
86
  ### Gradio Interface ###
87
  with gr.Blocks() as demo:
88
- gr.Markdown("# Group Chat: Meta-Llama and Qwen Models")
89
  gr.Markdown(
90
- "This is a demo where two models converse with each other. "
91
- "Leave the text box empty to let the models continue chatting automatically, "
92
  "or type a message to interject (a random model will then respond)."
93
  )
94
 
@@ -106,7 +104,7 @@ with gr.Blocks() as demo:
106
  # Maintain the conversation history in Gradio's state.
107
  state = gr.State([])
108
 
109
- # When the send button is clicked, update the chat.
110
  send_btn.click(
111
  fn=chat_logic,
112
  inputs=[user_message, state],
 
4
 
5
  ### Model Setup ###
6
  # Names for the two models
7
+ LLAMA_MODEL_NAME = "meta-llama/Llama-3.2-11B-Vision"
8
  QWEN_MODEL_NAME = "Qwen/Qwen2.5-VL-7B-Instruct"
9
 
10
+ # Load Meta-Llama Vision model
11
  llama_tokenizer = AutoTokenizer.from_pretrained(LLAMA_MODEL_NAME)
12
  llama_model = AutoModelForCausalLM.from_pretrained(LLAMA_MODEL_NAME, device_map="auto")
13
  llama_pipe = pipeline(
 
29
  """
30
  Given a prompt and a model choice, generate a response.
31
  """
 
32
  kwargs = {
33
  "max_length": 256,
34
  "do_sample": True,
 
44
 
45
  def chat_logic(user_input: str, chat_history: list):
46
  """
47
+ Build the conversation prompt from the history, decide which model responds,
48
+ and update the conversation.
 
49
  """
50
  if chat_history is None:
51
  chat_history = []
 
68
  else:
69
  selected_model = random.choice(["Llama", "Qwen"])
70
 
71
+ # Construct the conversation prompt.
72
  prompt = ""
73
  for speaker, message in chat_history:
74
  prompt += f"{speaker}: {message}\n"
 
83
 
84
  ### Gradio Interface ###
85
  with gr.Blocks() as demo:
86
+ gr.Markdown("# Group Chat: Meta-Llama Vision and Qwen Models")
87
  gr.Markdown(
88
+ "This demo allows two models to converse with each other. "
89
+ "Leave the textbox blank to let the models continue the conversation automatically, "
90
  "or type a message to interject (a random model will then respond)."
91
  )
92
 
 
104
  # Maintain the conversation history in Gradio's state.
105
  state = gr.State([])
106
 
107
+ # Update the chat when the send button is clicked.
108
  send_btn.click(
109
  fn=chat_logic,
110
  inputs=[user_message, state],