Bertug1911 commited on
Commit
1263291
·
verified ·
1 Parent(s): 46bbfe3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -12
app.py CHANGED
@@ -1,5 +1,6 @@
1
  import subprocess
2
  import sys
 
3
 
4
  def install_and_import(package):
5
  try:
@@ -23,18 +24,29 @@ model_name = "Bertug1911/BrtGPT-124m-Base"
23
  tokenizer = AutoTokenizer.from_pretrained(model_name)
24
  model = AutoModelForCausalLM.from_pretrained(model_name)
25
 
 
 
26
  def generate_text(prompt, temperature, top_k, max_new_tokens):
27
- inputs = tokenizer(prompt, return_tensors="pt")
28
- output = model.generate(
29
- **inputs,
30
- max_new_tokens=int(max_new_tokens),
31
- temperature=float(temperature),
32
- top_k=int(top_k),
33
- do_sample=True,
34
- )
35
- generated_text = tokenizer.decode(output[0], skip_special_tokens=False)
36
- generated_text = generated_text.replace(" ", "").replace("Ġ", " ")
37
- return generated_text
 
 
 
 
 
 
 
 
 
38
 
39
  arayuz = gr.Interface(
40
  fn=generate_text,
@@ -48,7 +60,8 @@ arayuz = gr.Interface(
48
  title="BrtGPT-124m-Base",
49
  description="""
50
  Adjust the parameters, select the model, and generate text. (0.7 Temp and Top-k = 10 is good for CREATIVITY, 0.1/0.15 Temp. and Top-k = 1-5 is good for ACCURACY.
51
- Model Page: 'https://huggingface.co/Bertug1911/BrtGPT-124m-Base' (And if you download or like the model I be happy so much!, And don't forget look at our COMMUNITY from model page!) """
 
52
  )
53
 
54
  arayuz.launch()
 
1
  import subprocess
2
  import sys
3
+ import threading
4
 
5
  def install_and_import(package):
6
  try:
 
24
  tokenizer = AutoTokenizer.from_pretrained(model_name)
25
  model = AutoModelForCausalLM.from_pretrained(model_name)
26
 
27
+ generation_lock = threading.Lock()
28
+
29
  def generate_text(prompt, temperature, top_k, max_new_tokens):
30
+ if not prompt.strip():
31
+ return "Error: Prompt cannot be empty."
32
+
33
+ if not generation_lock.acquire(blocking=False):
34
+ return "Error: Generation is still running, please wait."
35
+
36
+ try:
37
+ inputs = tokenizer(prompt, return_tensors="pt")
38
+ output = model.generate(
39
+ **inputs,
40
+ max_new_tokens=int(max_new_tokens),
41
+ temperature=float(temperature),
42
+ top_k=int(top_k),
43
+ do_sample=True,
44
+ )
45
+ generated_text = tokenizer.decode(output[0], skip_special_tokens=False)
46
+ generated_text = generated_text.replace(" ", "").replace("Ġ", " ")
47
+ return generated_text
48
+ finally:
49
+ generation_lock.release()
50
 
51
  arayuz = gr.Interface(
52
  fn=generate_text,
 
60
  title="BrtGPT-124m-Base",
61
  description="""
62
  Adjust the parameters, select the model, and generate text. (0.7 Temp and Top-k = 10 is good for CREATIVITY, 0.1/0.15 Temp. and Top-k = 1-5 is good for ACCURACY.
63
+ Model Page: 'https://huggingface.co/Bertug1911/BrtGPT-124m-Base' (And if you download or like the model I be happy so much!, And don't forget look at our COMMUNITY from model page!), NOTE: If screen stuck at generating, reload the page. Ayrıca, kullanıcı sayısı arttıkça performans düşerse, altyapıyı iyileştireceğiz (GPU türü ve sayısı artırılacak) ve sistemi optimize edeceğiz.
64
+ Sistemin şu anda çok yavaş olduğunu düşünüyorsanız, model sayfasının altındaki e-postadan bize ulaşın, güncelleyelim! Endişelenmeyin! """
65
  )
66
 
67
  arayuz.launch()