Bertug1911 commited on
Commit
74035a5
·
verified ·
1 Parent(s): 8d72cba

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -25
app.py CHANGED
@@ -1,6 +1,5 @@
1
  import subprocess
2
  import sys
3
- import threading
4
 
5
  def install_and_import(package):
6
  try:
@@ -24,29 +23,18 @@ model_name = "Bertug1911/BrtGPT-124m-Base"
24
  tokenizer = AutoTokenizer.from_pretrained(model_name)
25
  model = AutoModelForCausalLM.from_pretrained(model_name)
26
 
27
- generation_lock = threading.Lock()
28
-
29
  def generate_text(prompt, temperature, top_k, max_new_tokens):
30
- if not prompt.strip():
31
- return "Error: Prompt cannot be empty."
32
-
33
- if not generation_lock.acquire(blocking=False):
34
- return "Error: Generation is still running, please wait."
35
-
36
- try:
37
- inputs = tokenizer(prompt, return_tensors="pt")
38
- output = model.generate(
39
- **inputs,
40
- max_new_tokens=int(max_new_tokens),
41
- temperature=float(temperature),
42
- top_k=int(top_k),
43
- do_sample=True,
44
- )
45
- generated_text = tokenizer.decode(output[0], skip_special_tokens=False)
46
- generated_text = generated_text.replace(" ", "").replace("Ġ", " ")
47
- return generated_text
48
- finally:
49
- generation_lock.release()
50
 
51
  arayuz = gr.Interface(
52
  fn=generate_text,
@@ -62,8 +50,7 @@ arayuz = gr.Interface(
62
  Adjust the parameters, select the model, and generate text. (0.7 Temp and Top-k = 10 is good for CREATIVITY, 0.1/0.15 Temp. and Top-k = 1-5 is good for ACCURACY.
63
  Model Page: 'https://huggingface.co/Bertug1911/BrtGPT-124m-Base' (And if you download or like the model I be happy so much!, And don't forget look at our COMMUNITY from model page!), NOTE: If screen stuck at generating, reload the page. Also, if the performance decreases as the number of users increases,
64
  we will improve the infrastructure (increase the type and number of GPUs) and optimize the system. If you think the system is currently too slow,
65
- contact us at the email below the model page (or the one I mentioned below), we will update it! Don't worry! CONTACT E-MAIL: "bertugscpmail@gmail.com or bertug2099@gmail.com"""
66
  )
67
 
68
  arayuz.launch()
69
-
 
1
  import subprocess
2
  import sys
 
3
 
4
  def install_and_import(package):
5
  try:
 
23
  tokenizer = AutoTokenizer.from_pretrained(model_name)
24
  model = AutoModelForCausalLM.from_pretrained(model_name)
25
 
 
 
26
  def generate_text(prompt, temperature, top_k, max_new_tokens):
27
+ inputs = tokenizer(prompt, return_tensors="pt")
28
+ output = model.generate(
29
+ **inputs,
30
+ max_new_tokens=int(max_new_tokens),
31
+ temperature=float(temperature),
32
+ top_k=int(top_k),
33
+ do_sample=True,
34
+ )
35
+ generated_text = tokenizer.decode(output[0], skip_special_tokens=False)
36
+ generated_text = generated_text.replace(" ", "").replace("Ġ", " ")
37
+ return generated_text
 
 
 
 
 
 
 
 
 
38
 
39
  arayuz = gr.Interface(
40
  fn=generate_text,
 
50
  Adjust the parameters, select the model, and generate text. (0.7 Temp and Top-k = 10 is good for CREATIVITY, 0.1/0.15 Temp. and Top-k = 1-5 is good for ACCURACY.
51
  Model Page: 'https://huggingface.co/Bertug1911/BrtGPT-124m-Base' (And if you download or like the model I be happy so much!, And don't forget look at our COMMUNITY from model page!), NOTE: If screen stuck at generating, reload the page. Also, if the performance decreases as the number of users increases,
52
  we will improve the infrastructure (increase the type and number of GPUs) and optimize the system. If you think the system is currently too slow,
53
+ contact us at the email below the model page (or the one I mentioned below), we will update it! Don't worry! CONTACT E-MAIL: bertugscpmail@gmail.com or bertug2099@gmail.com"""
54
  )
55
 
56
  arayuz.launch()