diffusers-test / app.py
freemt
Update tenacity
8b6cf81
raw
history blame
3.62 kB
"""See https://huggingface.co/spaces/Gradio-Blocks/Story-to-video/blob/main/app.py."""
import base64
import io
import re
from random import choice
import gradio as gr
import translators as ts
from fastlid import fastlid
from logzero import logger
from PIL import Image # opencv-python
from tenacity import retry
from tenacity.stop import stop_after_attempt, stop_after_delay
# from PIL import Image
# from transformers import AutoTokenizer, AutoModelForSeq2SeqLM,pipeline
# import requests
# import torch
image_gen = gr.Interface.load("spaces/multimodalart/latentdiffusion")
examples_ = [
"蓝色的夜,森林中好多萤火虫",
"黑云压城城欲摧 ,甲光向日金鳞开。",
"黄金在河里流淌,宝石遍地,空中铺满巨大的彩虹。",
"季姬寂,集鸡,鸡即棘鸡。棘鸡饥叽,季姬及箕稷济鸡。",
"an apple",
"a cat",
"blue moon",
"metaverse",
]
@retry(stop=(stop_after_delay(10) | stop_after_attempt(5)))
def tr_(text: str) -> str:
"""Wrap [ts.deepl, ts.sogou, ts.baidu, ts.google] with tenacity."""
for tr in [ts.deepl, ts.sogou, ts.baidu, ts.google]:
try:
res = tr(text)
logger.info(" api used: %s", tr.__name__)
tr_.api_used = tr.__name__
break
except Exception:
continue
else:
res = "Something is probably wong, ping dev to fix it if you like."
return res
def generate_images(phrase: str, steps: int = 125):
if not phrase.strip():
phrase = choice(examples_)
generated_text = phrase
detected = "en"
api_used = ""
try:
detected = fastlid(phrase)[0]
except Exception as exc:
logger.error(exc)
# safe guard short Chinese phrases
if len(phrase) < 10 and re.search(r"[一-龟]+", phrase):
detected = "zh"
if detected not in ["en"]:
try:
generated_text = tr_(phrase)
api_used = f"({tr_.api_used})"
except Exception as exc:
logger.error(exc)
return None, f"{phrase:}, errors: {str(exc)}"
# steps = 125
width = 256
height = 256
num_images = 4
num_images = 1
diversity = 6
try:
image_bytes = image_gen(
generated_text, steps, width, height, num_images, diversity
)
except Exception as exc:
logger.error(exc)
return None, f"phrase: {phrase}, errors: {str(exc)}. Try again."
# Algo from spaces/Gradio-Blocks/latent_gpt2_story/blob/main/app.py
# generated_images = []
img = None
err_msg = f"{phrase} {api_used}"
for image in image_bytes[1]:
image_str = image[0]
try:
image_str = image_str.replace("data:image/png;base64,", "")
except Exception as exc:
logger.error(exc)
err_msg = str(exc)
return None, f"errors: {err_msg}. Try again."
decoded_bytes = base64.decodebytes(bytes(image_str, "utf-8"))
img = Image.open(io.BytesIO(decoded_bytes))
# generated_images.append(img)
# return generated_images
return img, err_msg
# examples = [["an apple", 125], ["Donald Trump", 125]]
examples = [list(_) for _ in zip(examples_, [125] * len(examples_))]
inputs = [
# "text",
gr.Text(value="a dog with a funny hat"),
gr.Slider(minimum=2, maximum=250, value=115, step=5),
]
iface = gr.Interface(
generate_images,
inputs,
# ["image", gr.Text(value="", label="phrase")],
[gr.Image(label=""), gr.Text(value="", label="phrase")],
examples=examples,
)
iface.launch(enable_queue=True)