mikeee commited on
Commit
9bbba8c
·
1 Parent(s): 75ece5a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -8
app.py CHANGED
@@ -1,16 +1,39 @@
 
 
 
1
  from diffusers import DiffusionPipeline
2
 
3
  ldm = DiffusionPipeline.from_pretrained("fusing/latent-diffusion-text2im-large")
4
 
5
  generator = torch.manual_seed(42)
6
 
7
- prompt = "A painting of a squirrel eating a burger"
8
- image = ldm([prompt], generator=generator, eta=0.3, guidance_scale=6.0, num_inference_steps=50)
9
 
10
- image_processed = image.cpu().permute(0, 2, 3, 1)
11
- image_processed = image_processed * 255.
12
- image_processed = image_processed.numpy().astype(np.uint8)
13
- image_pil = PIL.Image.fromarray(image_processed[0])
14
 
15
- # save image
16
- image_pil.save("test.png")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """See https://huggingface.co/fusing/latent-diffusion-text2im-large."""
2
+ import gradio as gr
3
+ import PIL
4
  from diffusers import DiffusionPipeline
5
 
6
  ldm = DiffusionPipeline.from_pretrained("fusing/latent-diffusion-text2im-large")
7
 
8
  generator = torch.manual_seed(42)
9
 
10
+ examples = ["A street sign that reads Huggingface", "A painting of a squirrel eating a burger"]
 
11
 
12
+ prompt_ = "A painting of a squirrel eating a burger"
 
 
 
13
 
14
+ def fn(prompt=prompt_):
15
+ image = ldm(
16
+ [prompt],
17
+ generator=generator,
18
+ eta=0.3,
19
+ guidance_scale=6.0,
20
+ num_inference_steps=50,
21
+ )
22
+
23
+ image_processed = image.cpu().permute(0, 2, 3, 1)
24
+ image_processed = image_processed * 255.
25
+ image_processed = image_processed.numpy().astype(np.uint8)
26
+ image_pil = PIL.Image.fromarray(image_processed[0])
27
+
28
+ # save image
29
+ # image_pil.save("test.png")
30
+ return image_pil
31
+
32
+ iface = gr.Interface(
33
+ fn=fn,
34
+ inputs="text",
35
+ outputs="image",
36
+ examples=examples,
37
+ live=True,
38
+ )
39
+ iface.launch()