Spaces:
Runtime error
Runtime error
File size: 2,010 Bytes
9a87834 9bbba8c 094f795 97ad6b1 094f795 97ad6b1 75ece5a 9a87834 75ece5a 9a87834 75ece5a 97ad6b1 094f795 97ad6b1 094f795 4dc9e2b 094f795 97ad6b1 094f795 97ad6b1 094f795 97ad6b1 094f795 97ad6b1 094f795 97ad6b1 8ddf994 094f795 97ad6b1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 |
"""See https://huggingface.co/spaces/Gradio-Blocks/Story-to-video/blob/main/app.py."""
import gradio as gr
import base64
import io
from logzero import logger
from PIL import Image # opencv-python
from random import choice
# from PIL import Image
# from transformers import AutoTokenizer, AutoModelForSeq2SeqLM,pipeline
# import requests
# import torch
image_gen = gr.Interface.load("spaces/multimodalart/latentdiffusion")
def generate_images(phrase: str, steps: int = 125):
if not phrase.strip():
phrase = choice(["an apple", "a cat", "blue moon", "metaverse"])
generated_text = phrase
# steps = 125
width = 256
height = 256
num_images = 4
num_images = 1
diversity = 6
try:
image_bytes = image_gen(generated_text, steps, width, height, num_images, diversity)
except Exception as exc:
logger.error(exc)
return img, f"phrase: {phrase}, errors: str(exc). Try again."
# Algo from spaces/Gradio-Blocks/latent_gpt2_story/blob/main/app.py
# generated_images = []
img = None
err_msg = phrase
for image in image_bytes[1]:
image_str = image[0]
try:
image_str = image_str.replace("data:image/png;base64,", "")
except Exception as exc:
logger.error(exc)
err_msg = str(exc)
return None, f"Error: {err_msg}. Try again."
decoded_bytes = base64.decodebytes(bytes(image_str, "utf-8"))
img = Image.open(io.BytesIO(decoded_bytes))
# generated_images.append(img)
# return generated_images
return img, err_msg
examples = [["an apple", 125], ["Donald Trump", 125]]
inputs = [
# "text",
gr.Text(value="a dog with a funny hat"),
gr.Slider(minimum=2, maximum=500, value=115, step=5),
]
iface = gr.Interface(
generate_images,
inputs,
# ["image", gr.Text(value="", label="phrase")],
[gr.Image(label=""), gr.Text(value="", label="phrase")],
examples=examples,
)
iface.launch(enable_queue=True)
|