khanhamzawiser commited on
Commit
0199221
·
verified ·
1 Parent(s): 004e432

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -15
app.py CHANGED
@@ -5,28 +5,44 @@ from huggingface_hub import InferenceClient
5
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
6
 
7
  # Define how the chatbot responds
8
- def respond(message, history: list[tuple[str, str]], system_message, max_tokens, temperature, top_p):
9
- messages = [{"role": "system", "content": system_message}]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  for val in history:
12
  if val[0]:
13
  messages.append({"role": "user", "content": val[0]})
14
  if val[1]:
15
  messages.append({"role": "assistant", "content": val[1]})
16
-
17
  messages.append({"role": "user", "content": message})
18
- response = ""
19
-
20
- for message in client.chat_completion(
21
- messages,
22
- max_tokens=max_tokens,
23
- stream=True,
24
- temperature=temperature,
25
- top_p=top_p,
26
- ):
27
- token = message.choices[0].delta.content
28
- response += token
29
- yield response
30
 
31
  # Build the UI using Gradio Blocks
32
  with gr.Blocks() as demo:
 
5
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
6
 
7
  # Define how the chatbot responds
8
+ shop_floor_data = {
9
+ "Machine A": {"Status": "Running", "Efficiency": "92%", "Output": "200 units/day"},
10
+ "Machine B": {"Status": "Idle", "Efficiency": "N/A", "Output": "0 units/day"},
11
+ "Assembly Line 1": {"Status": "Running", "Output": "450 units/day"},
12
+ "Welding Robot": {"Status": "Under maintenance", "Next Check": "April 10, 2025"},
13
+ }
14
+
15
+ def respond(
16
+ message,
17
+ history: list[tuple[str, str]],
18
+ system_message,
19
+ max_tokens,
20
+ temperature,
21
+ top_p,
22
+ ):
23
+ message_lower = message.lower()
24
 
25
+ if "status" in message_lower or "machine" in message_lower or "output" in message_lower:
26
+ response = "📊 **Shop Floor Status**:\n\n"
27
+ for name, info in shop_floor_data.items():
28
+ response += f"**{name}**\n"
29
+ for key, value in info.items():
30
+ response += f" - {key}: {value}\n"
31
+ response += "\n"
32
+ yield response
33
+ return
34
+
35
+ # Fallback to the LLM response
36
+ messages = [{"role": "system", "content": system_message}]
37
  for val in history:
38
  if val[0]:
39
  messages.append({"role": "user", "content": val[0]})
40
  if val[1]:
41
  messages.append({"role": "assistant", "content": val[1]})
 
42
  messages.append({"role": "user", "content": message})
43
+
44
+ response
45
+
 
 
 
 
 
 
 
 
 
46
 
47
  # Build the UI using Gradio Blocks
48
  with gr.Blocks() as demo: