Svngoku commited on
Commit
60aaf82
·
verified ·
1 Parent(s): d7ea2f7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +83 -221
app.py CHANGED
@@ -1,239 +1,101 @@
1
- import os
2
- from dotenv import load_dotenv
3
  import gradio as gr
4
- from proctor import (
5
- CompositeTechnique,
6
- RolePrompting,
7
- ChainOfThought,
8
- ChainOfVerification,
9
- SelfAsk,
10
- EmotionPrompting,
11
- list_techniques,
12
- )
13
 
14
- # Load environment variables (.env should contain OPENROUTER_API_KEY)
15
- load_dotenv()
16
- openrouter_key = os.environ.get("OPENROUTER_API_KEY")
17
 
18
- # Check API key
19
- if not openrouter_key:
20
- raise RuntimeError(
21
- "❌ OPENROUTER_API_KEY not set. Please set it in your .env file."
22
- )
23
-
24
- # ----- Model Configs -----
25
- MODEL_CONFIGS = {
26
- "gemini": {
27
- "model": "openrouter/google/gemini-2.5-flash-preview-05-20",
28
- "api_base": "https://openrouter.ai/api/v1",
29
- "api_key": openrouter_key,
30
- "temperature": 0.3,
31
- "max_tokens": 1500,
32
- },
33
- "claude": {
34
- "model": "openrouter/anthropic/claude-sonnet-4",
35
- "api_base": "https://openrouter.ai/api/v1",
36
- "api_key": openrouter_key,
37
- "temperature": 0.7,
38
- "max_tokens": 2000,
39
- },
40
- "deepseek": {
41
- "model": "openrouter/deepseek/deepseek-r1-0528",
42
- "api_base": "https://openrouter.ai/api/v1",
43
- "api_key": openrouter_key,
44
- "temperature": 0.6,
45
- "max_tokens": 3000,
46
- },
47
- "llama": {
48
- "model": "openrouter/meta-llama/llama-4-scout",
49
- "api_base": "https://openrouter.ai/api/v1",
50
- "api_key": openrouter_key,
51
- "temperature": 0.6,
52
- "max_tokens": 2500,
53
- },
54
- "mistral": {
55
- "model": "openrouter/mistralai/mistral-small-3.1-24b-instruct",
56
- "api_base": "https://openrouter.ai/api/v1",
57
- "api_key": openrouter_key,
58
- "temperature": 0.8,
59
- "max_tokens": 1000,
60
- },
61
- }
62
-
63
- # ----- Tool Functions -----
64
-
65
- def proctor_expert_cot(problem: str) -> dict:
66
- """
67
- Chain-of-Thought, Verification, and Role Prompting on Gemini.
68
- """
69
- technique = CompositeTechnique(
70
- name="Expert Chain-of-Thought",
71
- identifier="custom-expert-cot",
72
- techniques=[
73
- RolePrompting(),
74
- ChainOfThought(),
75
- ChainOfVerification(),
76
- ],
77
- )
78
- response = technique.execute(
79
- problem,
80
- llm_config=MODEL_CONFIGS["gemini"],
81
- role="Expert House Builder and Construction Manager"
82
- )
83
- return {
84
- "model": "Google Gemini 2.5 Flash",
85
- "technique": "Expert Chain-of-Thought",
86
- "response": response
87
- }
88
-
89
- def proctor_claude_cot(problem: str) -> dict:
90
- """
91
- Chain-of-Thought with Claude 4 Sonnet.
92
- """
93
- technique = ChainOfThought()
94
- response = technique.execute(problem, llm_config=MODEL_CONFIGS["claude"])
95
- return {
96
- "model": "Claude 4 Sonnet",
97
- "technique": "Chain-of-Thought",
98
- "response": response
99
- }
100
-
101
- def proctor_deepseek_reasoning(problem: str) -> dict:
102
- """
103
- Deep reasoning with DeepSeek R1: CoT, SelfAsk, Verification.
104
- """
105
- technique = CompositeTechnique(
106
- name="Deep Reasoning Analysis",
107
- identifier="deep-reasoning",
108
- techniques=[
109
- ChainOfThought(),
110
- SelfAsk(),
111
- ChainOfVerification(),
112
- ],
113
- )
114
- response = technique.execute(problem, llm_config=MODEL_CONFIGS["deepseek"])
115
- return {
116
- "model": "DeepSeek R1",
117
- "technique": "Deep Reasoning Analysis",
118
- "response": response
119
- }
120
-
121
- def proctor_llama_emotion(problem: str) -> dict:
122
  """
123
- Emotion Prompting with Llama 4 Scout.
 
 
 
 
 
124
  """
125
- technique = EmotionPrompting()
126
- response = technique.execute(
127
- problem,
128
- llm_config=MODEL_CONFIGS["llama"],
129
- emotion="thoughtful and methodical"
130
- )
131
- return {
132
- "model": "Llama 4 Scout",
133
- "technique": "Emotion Prompting",
134
- "response": response
135
- }
 
 
 
 
 
136
 
137
- def proctor_mistral_tips(problem: str) -> dict:
138
  """
139
- Fast Role Prompting with Mistral Small (for quick suggestions).
140
  """
141
- technique = RolePrompting()
142
- response = technique.execute(
143
- problem,
144
- llm_config=MODEL_CONFIGS["mistral"],
145
- role="Construction Project Manager"
146
- )
147
- return {
148
- "model": "Mistral Small 3.1 24B",
149
- "technique": "Role Prompting",
150
- "response": response
151
- }
 
 
 
 
 
152
 
153
- # Optionally, expose a unified tool for arbitrary model/technique selection:
154
- def proctor_flexible(
155
- problem: str,
156
- model: str = "gemini",
157
- technique: str = "ChainOfThought",
158
- role: str = "",
159
- emotion: str = ""
160
- ) -> dict:
161
  """
162
- Flexible interface for any model/technique combo.
 
 
 
163
  """
164
- technique_map = {
165
- "ChainOfThought": ChainOfThought,
166
- "RolePrompting": RolePrompting,
167
- "EmotionPrompting": EmotionPrompting,
168
- "SelfAsk": SelfAsk,
169
- "ChainOfVerification": ChainOfVerification,
170
- }
171
- if technique == "CompositeExpert":
172
- tech = CompositeTechnique(
173
- name="Expert Chain-of-Thought",
174
- identifier="custom-expert-cot",
175
- techniques=[
176
- RolePrompting(),
177
- ChainOfThought(),
178
- ChainOfVerification(),
179
- ],
180
- )
181
- response = tech.execute(problem, llm_config=MODEL_CONFIGS[model], role=role)
182
- elif technique == "DeepReasoning":
183
- tech = CompositeTechnique(
184
- name="Deep Reasoning Analysis",
185
- identifier="deep-reasoning",
186
- techniques=[
187
- ChainOfThought(),
188
- SelfAsk(),
189
- ChainOfVerification(),
190
- ],
191
- )
192
- response = tech.execute(problem, llm_config=MODEL_CONFIGS[model])
193
  else:
194
- tech_cls = technique_map.get(technique, ChainOfThought)
195
- if technique == "RolePrompting":
196
- response = tech_cls().execute(problem, llm_config=MODEL_CONFIGS[model], role=role)
197
- elif technique == "EmotionPrompting":
198
- response = tech_cls().execute(problem, llm_config=MODEL_CONFIGS[model], emotion=emotion)
199
- else:
200
- response = tech_cls().execute(problem, llm_config=MODEL_CONFIGS[model])
201
- return {
202
- "model": MODEL_CONFIGS[model]["model"],
203
- "technique": technique,
204
- "response": response
205
- }
206
-
207
- # ----- Gradio/MCP Interface -----
208
 
209
  with gr.Blocks() as demo:
210
- gr.Markdown("# 🏗️ Proctor AI MCP Server\nAdvanced prompt engineering tools via OpenRouter and Proctor AI.\n\n*Try from an MCP-compatible client or the web UI below!*")
211
- with gr.Tab("Gemini (Expert CoT)"):
212
- gr.Interface(fn=proctor_expert_cot, inputs=gr.Textbox(label="Problem"), outputs=gr.JSON(), allow_flagging="never")
213
- with gr.Tab("Claude 4 (Chain-of-Thought)"):
214
- gr.Interface(fn=proctor_claude_cot, inputs=gr.Textbox(label="Problem"), outputs=gr.JSON(), allow_flagging="never")
215
- with gr.Tab("DeepSeek R1 (Deep Reasoning)"):
216
- gr.Interface(fn=proctor_deepseek_reasoning, inputs=gr.Textbox(label="Problem"), outputs=gr.JSON(), allow_flagging="never")
217
- with gr.Tab("Llama 4 (Emotion Prompting)"):
218
- gr.Interface(fn=proctor_llama_emotion, inputs=gr.Textbox(label="Problem"), outputs=gr.JSON(), allow_flagging="never")
219
- with gr.Tab("Mistral (Quick Tips)"):
220
- gr.Interface(fn=proctor_mistral_tips, inputs=gr.Textbox(label="Problem (tips request)"), outputs=gr.JSON(), allow_flagging="never")
221
- with gr.Tab("Flexible (Advanced)"):
222
- model_dropdown = gr.Dropdown(choices=list(MODEL_CONFIGS.keys()), value="gemini", label="Model")
223
- technique_dropdown = gr.Dropdown(
224
- choices=["ChainOfThought", "RolePrompting", "EmotionPrompting", "SelfAsk", "ChainOfVerification", "CompositeExpert", "DeepReasoning"],
225
- value="ChainOfThought",
226
- label="Technique"
227
  )
228
- role_input = gr.Textbox(label="Role (optional)", value="")
229
- emotion_input = gr.Textbox(label="Emotion (optional)", value="")
230
- flexible_iface = gr.Interface(
231
- fn=proctor_flexible,
232
- inputs=[gr.Textbox(label="Problem"),
233
- model_dropdown,
234
- technique_dropdown,
235
- role_input,
236
- emotion_input],
237
  outputs=gr.JSON(),
238
  allow_flagging="never"
239
  )
 
 
 
1
  import gradio as gr
2
+ import requests
 
 
 
 
 
 
 
 
3
 
4
+ COINGECKO_API_BASE = "https://api.coingecko.com/api/v3"
 
 
5
 
6
+ def get_current_price(coin_id: str = "bitcoin", vs_currency: str = "usd") -> dict:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  """
8
+ Get the current price of a cryptocurrency in a given currency.
9
+ Args:
10
+ coin_id (str): CoinGecko coin ID (e.g. 'bitcoin', 'ethereum')
11
+ vs_currency (str): Fiat currency (e.g. 'usd', 'eur')
12
+ Returns:
13
+ dict: Current price and other info, or error message.
14
  """
15
+ url = f"{COINGECKO_API_BASE}/simple/price"
16
+ params = {"ids": coin_id, "vs_currencies": vs_currency, "include_market_cap": "true"}
17
+ resp = requests.get(url, params=params)
18
+ if resp.status_code == 200:
19
+ data = resp.json()
20
+ if coin_id in data:
21
+ return {
22
+ "coin": coin_id,
23
+ "currency": vs_currency,
24
+ "price": data[coin_id][vs_currency],
25
+ "market_cap": data[coin_id].get(f"{vs_currency}_market_cap"),
26
+ }
27
+ else:
28
+ return {"error": "Coin not found"}
29
+ else:
30
+ return {"error": f"Failed to fetch data ({resp.status_code})"}
31
 
32
+ def get_market_info() -> dict:
33
  """
34
+ Get global cryptocurrency market information.
35
  """
36
+ url = f"{COINGECKO_API_BASE}/global"
37
+ resp = requests.get(url)
38
+ if resp.status_code == 200:
39
+ data = resp.json()["data"]
40
+ return {
41
+ "active_cryptocurrencies": data["active_cryptocurrencies"],
42
+ "upcoming_icos": data["upcoming_icos"],
43
+ "ongoing_icos": data["ongoing_icos"],
44
+ "ended_icos": data["ended_icos"],
45
+ "markets": data["markets"],
46
+ "total_market_cap_usd": data["total_market_cap"]["usd"],
47
+ "market_cap_change_percentage_24h_usd": data["market_cap_change_percentage_24h_usd"],
48
+ "updated_at": data["updated_at"]
49
+ }
50
+ else:
51
+ return {"error": f"Failed to fetch data ({resp.status_code})"}
52
 
53
+ def get_coin_history(coin_id: str = "bitcoin", date: str = "01-01-2023") -> dict:
 
 
 
 
 
 
 
54
  """
55
+ Get historical price data for a coin on a given date (dd-mm-yyyy).
56
+ Args:
57
+ coin_id (str): CoinGecko coin ID (e.g. 'bitcoin')
58
+ date (str): Date in format 'dd-mm-yyyy'
59
  """
60
+ url = f"{COINGECKO_API_BASE}/coins/{coin_id}/history"
61
+ params = {"date": date}
62
+ resp = requests.get(url, params=params)
63
+ if resp.status_code == 200:
64
+ data = resp.json()
65
+ try:
66
+ price = data["market_data"]["current_price"]["usd"]
67
+ except Exception:
68
+ price = None
69
+ return {
70
+ "coin": coin_id,
71
+ "date": date,
72
+ "price_usd": price,
73
+ "snapshot": data.get("market_data", {})
74
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75
  else:
76
+ return {"error": f"Failed to fetch data ({resp.status_code})"}
 
 
 
 
 
 
 
 
 
 
 
 
 
77
 
78
  with gr.Blocks() as demo:
79
+ gr.Markdown("# 🪙 CoinGecko MCP API\n\n**Live cryptocurrency data, globally accessible via Model Context Protocol (MCP)!**")
80
+ with gr.Tab("Get Current Price"):
81
+ gr.Interface(
82
+ fn=get_current_price,
83
+ inputs=[gr.Textbox(value="bitcoin", label="Coin ID (e.g. bitcoin, ethereum)"),
84
+ gr.Textbox(value="usd", label="Fiat Currency (e.g. usd, eur)")],
85
+ outputs=gr.JSON(),
86
+ allow_flagging="never"
87
+ )
88
+ with gr.Tab("Global Market Info"):
89
+ gr.Interface(
90
+ fn=get_market_info,
91
+ inputs=[],
92
+ outputs=gr.JSON(),
93
+ allow_flagging="never"
 
 
94
  )
95
+ with gr.Tab("Get Coin History"):
96
+ gr.Interface(
97
+ fn=get_coin_history,
98
+ inputs=[gr.Textbox(value="bitcoin", label="Coin ID"), gr.Textbox(value="01-01-2023", label="Date (dd-mm-yyyy)")],
 
 
 
 
 
99
  outputs=gr.JSON(),
100
  allow_flagging="never"
101
  )