ysharma HF Staff commited on
Commit
4024b96
·
verified ·
1 Parent(s): ed7a037

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +52 -0
app.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from gradio import ChatMessage
3
+ import time
4
+
5
+ sleep_time = 0.5
6
+
7
+ def simulate_thinking_chat(message, history):
8
+ start_time = time.time()
9
+ response = ChatMessage(
10
+ content="",
11
+ metadata={"title": "_Thinking_ step-by-step", "id": 0, "status": "pending"}
12
+ )
13
+ yield response
14
+
15
+ thoughts = [
16
+ "First, I need to understand the core aspects of the query...",
17
+ "Now, considering the broader context and implications...",
18
+ "Analyzing potential approaches to formulate a comprehensive answer...",
19
+ "Finally, structuring the response for clarity and completeness..."
20
+ ]
21
+
22
+ accumulated_thoughts = ""
23
+ for thought in thoughts:
24
+ time.sleep(sleep_time)
25
+ accumulated_thoughts += f"- {thought}\n\n"
26
+ response.content = accumulated_thoughts.strip()
27
+ yield response
28
+
29
+ response.metadata["status"] = "done"
30
+ response.metadata["duration"] = time.time() - start_time
31
+ yield response
32
+
33
+ time.sleep(5.0)
34
+ response = [
35
+ response,
36
+ ChatMessage(
37
+ content="Based on my thoughts and analysis above, my response is: This dummy repro shows how thoughts of a thinking LLM can be progressively shown before providing its final answer."
38
+ )
39
+ ]
40
+ yield response
41
+
42
+
43
+
44
+
45
+ demo = gr.ChatInterface(
46
+ simulate_thinking_chat,
47
+ title="Thinking LLM Chat Interface 🤔",
48
+ type="messages",
49
+ )
50
+
51
+ if __name__ == "__main__":
52
+ demo.launch()