coldn00dl3s commited on
Commit
b516523
·
verified ·
1 Parent(s): 8a8619b

updated description

Browse files
Files changed (1) hide show
  1. app.py +5 -4
app.py CHANGED
@@ -36,7 +36,8 @@ def get_response_from_gemini(prompt: str, key) -> str:
36
  return response.text.strip()
37
 
38
  def get_response_from_deepseek(prompt: str, key) -> str:
39
- response = requests.post(
 
40
  url="https://openrouter.ai/api/v1/chat/completions",
41
  headers={"Authorization": f"Bearer {key}"},
42
  data=json.dumps({
@@ -181,8 +182,8 @@ st.markdown(
181
  unsafe_allow_html=True
182
  )
183
 
184
- st.title("Predicting Human Preference : Gemini vs DeepSeek")
185
- st.write("As part of this demo, we make use of two SOTA LLMs : [Gemini 2.5 Pro](https://deepmind.google/technologies/gemini/pro/) and [DeepSeek R1](https://api-docs.deepseek.com/news/news250120) and make them compete against each other on a given prompt (to be entered through the sidebar)")
186
  st.write("Using our proposed hybrid model, we predict which response is more suited to be preferred by a human user.")
187
 
188
  st.sidebar.title("Ask a Question!")
@@ -221,7 +222,7 @@ if st.sidebar.button("Generate Responses") and question:
221
  })
222
 
223
  if st.session_state["generated"]:
224
- tab1, tab2, tab3 = st.tabs(["Predictions","Model Architecture", "📈 Metric Curves"])
225
 
226
  with tab1:
227
  st.subheader("Model Responses")
 
36
  return response.text.strip()
37
 
38
  def get_response_from_deepseek(prompt: str, key) -> str:
39
+ response = requests.
40
+ post(
41
  url="https://openrouter.ai/api/v1/chat/completions",
42
  headers={"Authorization": f"Bearer {key}"},
43
  data=json.dumps({
 
182
  unsafe_allow_html=True
183
  )
184
 
185
+ st.title("Predicting Human Preference : LLM Battleground")
186
+ st.write("As part of this demo, we make use of any two of the following SOTA LLMs : [Gemini 2.5 Pro](https://deepmind.google/technologies/gemini/pro/), [DeepSeek R1](https://api-docs.deepseek.com/news/news250120), [Mistral Small 3.1](https://mistral.ai/news/mistral-small-3-1) and [LLaMa 4 Scout](https://ai.meta.com/blog/llama-4-multimodal-intelligence/) and make them compete against each other on a given prompt (to be entered through the sidebar)")
187
  st.write("Using our proposed hybrid model, we predict which response is more suited to be preferred by a human user.")
188
 
189
  st.sidebar.title("Ask a Question!")
 
222
  })
223
 
224
  if st.session_state["generated"]:
225
+ tab1, tab2, tab3 = st.tabs(["Predictions","Model Architecture", "Metric Curves"])
226
 
227
  with tab1:
228
  st.subheader("Model Responses")