AryanRathod3097 commited on
Commit
04fe410
Β·
verified Β·
1 Parent(s): e189ab7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +61 -67
app.py CHANGED
@@ -1,6 +1,8 @@
1
  """
2
- CodeNyx – zero-config Gradio chatbot
3
- Auto-handles Hugging Face tokens without stdin prompts.
 
 
4
  """
5
 
6
  import os
@@ -15,40 +17,26 @@ from transformers import (
15
  from huggingface_hub import login
16
  from threading import Thread
17
 
18
- MODEL_ID = "bigcode/starcoder2-3b-instruct"
 
 
19
  BOT_NAME = "CodeNyx"
20
- SYSTEM_PROMPT = (f"You are {BOT_NAME}, an expert open-source coding assistant. "
21
- "Always provide concise, runnable code snippets with short explanations.")
22
-
 
 
23
  MAX_NEW_TOK = 1024
24
  TEMPERATURE = 0.2
25
  TOP_P = 0.9
26
 
27
  # ------------------------------------------------------------------
28
- # 1. Token helper (Gradio popup instead of stdin)
29
- # ------------------------------------------------------------------
30
- def ensure_token(token_value):
31
- """
32
- token_value comes from the Gradio UI the first time.
33
- We cache it in environment variable HF_TOKEN and login once.
34
- Returns True on success.
35
- """
36
- token = token_value.strip()
37
- if not token:
38
- return False
39
- os.environ["HF_TOKEN"] = token
40
- login(token)
41
- return True
42
-
43
- # ------------------------------------------------------------------
44
- # 2. Lazy model loader (once token is ready)
45
  # ------------------------------------------------------------------
46
- model, tokenizer = None, None
47
-
48
- def load_model():
49
- global model, tokenizer
50
- if model is not None:
51
- return True # already loaded
52
 
53
  bnb_config = BitsAndBytesConfig(
54
  load_in_4bit=True,
@@ -59,7 +47,7 @@ def load_model():
59
 
60
  tokenizer = AutoTokenizer.from_pretrained(
61
  MODEL_ID,
62
- use_auth_token=os.getenv("HF_TOKEN"),
63
  trust_remote_code=True,
64
  )
65
  if tokenizer.pad_token is None:
@@ -69,13 +57,13 @@ def load_model():
69
  MODEL_ID,
70
  quantization_config=bnb_config,
71
  device_map="auto",
72
- use_auth_token=os.getenv("HF_TOKEN"),
73
  trust_remote_code=True,
74
  )
75
- return True
76
 
77
  # ------------------------------------------------------------------
78
- # 3. Chat logic
79
  # ------------------------------------------------------------------
80
  def build_prompt(history, user_input):
81
  messages = [{"role": "system", "content": SYSTEM_PROMPT}]
@@ -118,48 +106,50 @@ def bot_turn(history):
118
  yield history
119
 
120
  # ------------------------------------------------------------------
121
- # 4. Gradio UI flow
122
  # ------------------------------------------------------------------
 
 
123
  with gr.Blocks(title=f"{BOT_NAME} – AI Pair-Programmer") as demo:
124
  gr.Markdown(f"""
125
  # πŸ€– {BOT_NAME} – AI Pair-Programmer
126
- *3 B params, 100 % free-tier friendly.*
127
- Paste your Hugging Face token **once** if asked, then chat away.
128
  """)
129
 
130
- token_box = gr.Textbox(label="πŸ€— Hugging Face Token (only first time)", type="password")
131
- load_btn = gr.Button("Authorize")
132
- status_lbl = gr.Label(value="Waiting for token …")
133
-
134
- # --- main chat controls (hidden until token OK) ---
135
- with gr.Column(visible=False) as chat_col:
136
- chatbot = gr.Chatbot(height=450)
137
- with gr.Row():
138
- msg = gr.Textbox(
139
- placeholder="Ask me to write / debug / explain code …",
140
- lines=2,
141
- scale=8,
142
- show_label=False,
143
- container=False,
144
- )
145
- send_btn = gr.Button("Send", scale=1, variant="primary")
146
- clear_btn = gr.Button("πŸ—‘οΈ Clear")
147
 
148
  # ------------------------------------------------------------------
149
- # 5. Event wiring
150
  # ------------------------------------------------------------------
151
- def _auth(token):
152
- ok = ensure_token(token)
153
- if ok:
154
- try:
155
- load_model()
156
- return gr.update(visible=False), gr.update(visible=True), "βœ… Ready! Start coding."
157
- except Exception as e:
158
- return gr.update(visible=True), gr.update(visible=False), f"❌ Error: {e}"
159
- else:
160
- return gr.update(visible=True), gr.update(visible=False), "❌ Invalid token."
161
-
162
- load_btn.click(_auth, token_box, [token_box, chat_col, status_lbl])
163
 
164
  def _send(user_msg, hist):
165
  return user_turn(user_msg, hist)
@@ -171,4 +161,8 @@ with gr.Blocks(title=f"{BOT_NAME} – AI Pair-Programmer") as demo:
171
  send_btn.click(_send, [msg, chatbot], [msg, chatbot]).then(_bot, chatbot, chatbot)
172
  clear_btn.click(lambda: None, None, chatbot)
173
 
174
- demo.queue().launch(server_name="0.0.0.0", server_port=7860, share=True)
 
 
 
 
 
1
  """
2
+ CodeNyx – HF-Space-ready chatbot
3
+ - 3 B parameters
4
+ - 4-bit quant β†’ < 8 GB VRAM
5
+ - Accepts HF token only for gated models (none required here)
6
  """
7
 
8
  import os
 
17
  from huggingface_hub import login
18
  from threading import Thread
19
 
20
+ # ------------------------------------------------------------------
21
+ # 1. Configuration
22
+ # ------------------------------------------------------------------
23
  BOT_NAME = "CodeNyx"
24
+ MODEL_ID = "bigcode/starcoder2-3b" # <- public, no token needed
25
+ SYSTEM_PROMPT = (
26
+ f"You are {BOT_NAME}, an expert open-source coding assistant. "
27
+ "Always provide concise, runnable code snippets with short explanations."
28
+ )
29
  MAX_NEW_TOK = 1024
30
  TEMPERATURE = 0.2
31
  TOP_P = 0.9
32
 
33
  # ------------------------------------------------------------------
34
+ # 2. Lightweight 4-bit loader
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
  # ------------------------------------------------------------------
36
+ def load_model(token: str = None):
37
+ """Return (tokenizer, model) or raise."""
38
+ if token:
39
+ login(token) # only if provided
 
 
40
 
41
  bnb_config = BitsAndBytesConfig(
42
  load_in_4bit=True,
 
47
 
48
  tokenizer = AutoTokenizer.from_pretrained(
49
  MODEL_ID,
50
+ use_auth_token=token or None,
51
  trust_remote_code=True,
52
  )
53
  if tokenizer.pad_token is None:
 
57
  MODEL_ID,
58
  quantization_config=bnb_config,
59
  device_map="auto",
60
+ use_auth_token=token or None,
61
  trust_remote_code=True,
62
  )
63
+ return tokenizer, model
64
 
65
  # ------------------------------------------------------------------
66
+ # 3. Chat helpers
67
  # ------------------------------------------------------------------
68
  def build_prompt(history, user_input):
69
  messages = [{"role": "system", "content": SYSTEM_PROMPT}]
 
106
  yield history
107
 
108
  # ------------------------------------------------------------------
109
+ # 4. Gradio UI
110
  # ------------------------------------------------------------------
111
+ tokenizer, model = None, None # lazy load
112
+
113
  with gr.Blocks(title=f"{BOT_NAME} – AI Pair-Programmer") as demo:
114
  gr.Markdown(f"""
115
  # πŸ€– {BOT_NAME} – AI Pair-Programmer
116
+ *Public model β€” no token needed.*
117
+ Ask any coding question and get **runnable code + short explanations**.
118
  """)
119
 
120
+ # Optional token box (for future gated models)
121
+ token_box = gr.Textbox(label="πŸ€— HF Token (optional)", type="password", visible=False)
122
+ load_btn = gr.Button("Load / Reload", visible=False)
123
+ status_lbl = gr.Label(value="Loading model …")
124
+
125
+ # Chat area
126
+ chatbot = gr.Chatbot(height=450)
127
+ with gr.Row():
128
+ msg = gr.Textbox(
129
+ placeholder="Ask me to write, debug, or explain code …",
130
+ lines=2,
131
+ scale=8,
132
+ show_label=False,
133
+ container=False,
134
+ )
135
+ send_btn = gr.Button("Send", scale=1, variant="primary")
136
+ clear_btn = gr.Button("πŸ—‘οΈ Clear")
137
 
138
  # ------------------------------------------------------------------
139
+ # 5. Events
140
  # ------------------------------------------------------------------
141
+ def _load(token):
142
+ global tokenizer, model
143
+ try:
144
+ tokenizer, model = load_model(token if token.strip() else None)
145
+ return "βœ… Model loaded!"
146
+ except Exception as e:
147
+ return f"❌ {e}"
148
+
149
+ load_btn.click(_load, token_box, status_lbl)
150
+
151
+ # auto-load once on start
152
+ demo.load(_load, token_box, status_lbl)
153
 
154
  def _send(user_msg, hist):
155
  return user_turn(user_msg, hist)
 
161
  send_btn.click(_send, [msg, chatbot], [msg, chatbot]).then(_bot, chatbot, chatbot)
162
  clear_btn.click(lambda: None, None, chatbot)
163
 
164
+ # ------------------------------------------------------------------
165
+ # 6. Launch
166
+ # ------------------------------------------------------------------
167
+ if __name__ == "__main__":
168
+ demo.queue().launch(server_name="0.0.0.0", server_port=7860, share=True)