freemt commited on
Commit
97ad6b1
·
1 Parent(s): 094f795
Files changed (2) hide show
  1. app.py +21 -8
  2. gradio_queue.db +0 -0
app.py CHANGED
@@ -2,7 +2,9 @@
2
  import gradio as gr
3
  import base64
4
  import io
 
5
  from PIL import Image # opencv-python
 
6
 
7
  # from PIL import Image
8
  # from transformers import AutoTokenizer, AutoModelForSeq2SeqLM,pipeline
@@ -12,9 +14,11 @@ from PIL import Image # opencv-python
12
  image_gen = gr.Interface.load("spaces/multimodalart/latentdiffusion")
13
 
14
 
15
- def generate_images(phrase: str):
 
 
16
  generated_text = phrase
17
- steps = 125
18
  width = 256
19
  height = 256
20
  num_images = 4
@@ -27,29 +31,38 @@ def generate_images(phrase: str):
27
  # generated_images = []
28
 
29
  img = None
 
30
  for image in image_bytes[1]:
31
  image_str = image[0]
32
  try:
33
  image_str = image_str.replace("data:image/png;base64,", "")
34
  except Exception as exc:
35
  logger.error(exc)
36
- return None
 
37
  decoded_bytes = base64.decodebytes(bytes(image_str, "utf-8"))
38
  img = Image.open(io.BytesIO(decoded_bytes))
39
 
40
  # generated_images.append(img)
41
 
42
  # return generated_images
43
- return img
44
 
 
45
 
46
- examples = ["an apple", "Donald Trump"]
 
 
 
 
 
 
 
47
 
48
  iface = gr.Interface(
49
  generate_images,
50
- "text",
51
- "image",
52
  examples=examples,
53
  )
54
 
55
- iface.launch()
 
2
  import gradio as gr
3
  import base64
4
  import io
5
+ from logzero import logger
6
  from PIL import Image # opencv-python
7
+ from random import choice
8
 
9
  # from PIL import Image
10
  # from transformers import AutoTokenizer, AutoModelForSeq2SeqLM,pipeline
 
14
  image_gen = gr.Interface.load("spaces/multimodalart/latentdiffusion")
15
 
16
 
17
+ def generate_images(phrase: str, steps: int = 125):
18
+ if not phrase.strip():
19
+ phrase = choice(["an apple", "a cat", "blue moon", "metaverse"])
20
  generated_text = phrase
21
+ # steps = 125
22
  width = 256
23
  height = 256
24
  num_images = 4
 
31
  # generated_images = []
32
 
33
  img = None
34
+ err_msg = phrase
35
  for image in image_bytes[1]:
36
  image_str = image[0]
37
  try:
38
  image_str = image_str.replace("data:image/png;base64,", "")
39
  except Exception as exc:
40
  logger.error(exc)
41
+ err_msg = str(exc)
42
+ return None, f"Error: {err_msg}. Try again."
43
  decoded_bytes = base64.decodebytes(bytes(image_str, "utf-8"))
44
  img = Image.open(io.BytesIO(decoded_bytes))
45
 
46
  # generated_images.append(img)
47
 
48
  # return generated_images
 
49
 
50
+ return img, err_msg
51
 
52
+
53
+ examples = [["an apple", 125], ["Donald Trump", 125]]
54
+
55
+ inputs = [
56
+ # "text",
57
+ gr.Text(value="a dog with a funny hat"),
58
+ gr.Slider(minimum=2, maximum=500, value=115, step=5),
59
+ ]
60
 
61
  iface = gr.Interface(
62
  generate_images,
63
+ inputs,
64
+ ["image", gr.Text(value="", label="phrase")],
65
  examples=examples,
66
  )
67
 
68
+ iface.launch(enable_queue=True)
gradio_queue.db ADDED
Binary file (209 kB). View file