Spaces:
Runtime error
Runtime error
File size: 3,618 Bytes
9a87834 094f795 8b6cf81 97ad6b1 094f795 8b6cf81 75ece5a 9a87834 75ece5a 9a87834 75ece5a 8b6cf81 75ece5a 97ad6b1 8b6cf81 094f795 8b6cf81 97ad6b1 094f795 4dc9e2b 8b6cf81 4dc9e2b 8b6cf81 094f795 8b6cf81 094f795 97ad6b1 8b6cf81 094f795 97ad6b1 094f795 97ad6b1 8b6cf81 97ad6b1 8b6cf81 97ad6b1 094f795 97ad6b1 8ddf994 094f795 97ad6b1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 |
"""See https://huggingface.co/spaces/Gradio-Blocks/Story-to-video/blob/main/app.py."""
import base64
import io
import re
from random import choice
import gradio as gr
import translators as ts
from fastlid import fastlid
from logzero import logger
from PIL import Image # opencv-python
from tenacity import retry
from tenacity.stop import stop_after_attempt, stop_after_delay
# from PIL import Image
# from transformers import AutoTokenizer, AutoModelForSeq2SeqLM,pipeline
# import requests
# import torch
image_gen = gr.Interface.load("spaces/multimodalart/latentdiffusion")
examples_ = [
"蓝色的夜,森林中好多萤火虫",
"黑云压城城欲摧 ,甲光向日金鳞开。",
"黄金在河里流淌,宝石遍地,空中铺满巨大的彩虹。",
"季姬寂,集鸡,鸡即棘鸡。棘鸡饥叽,季姬及箕稷济鸡。",
"an apple",
"a cat",
"blue moon",
"metaverse",
]
@retry(stop=(stop_after_delay(10) | stop_after_attempt(5)))
def tr_(text: str) -> str:
"""Wrap [ts.deepl, ts.sogou, ts.baidu, ts.google] with tenacity."""
for tr in [ts.deepl, ts.sogou, ts.baidu, ts.google]:
try:
res = tr(text)
logger.info(" api used: %s", tr.__name__)
tr_.api_used = tr.__name__
break
except Exception:
continue
else:
res = "Something is probably wong, ping dev to fix it if you like."
return res
def generate_images(phrase: str, steps: int = 125):
if not phrase.strip():
phrase = choice(examples_)
generated_text = phrase
detected = "en"
api_used = ""
try:
detected = fastlid(phrase)[0]
except Exception as exc:
logger.error(exc)
# safe guard short Chinese phrases
if len(phrase) < 10 and re.search(r"[一-龟]+", phrase):
detected = "zh"
if detected not in ["en"]:
try:
generated_text = tr_(phrase)
api_used = f"({tr_.api_used})"
except Exception as exc:
logger.error(exc)
return None, f"{phrase:}, errors: {str(exc)}"
# steps = 125
width = 256
height = 256
num_images = 4
num_images = 1
diversity = 6
try:
image_bytes = image_gen(
generated_text, steps, width, height, num_images, diversity
)
except Exception as exc:
logger.error(exc)
return None, f"phrase: {phrase}, errors: {str(exc)}. Try again."
# Algo from spaces/Gradio-Blocks/latent_gpt2_story/blob/main/app.py
# generated_images = []
img = None
err_msg = f"{phrase} {api_used}"
for image in image_bytes[1]:
image_str = image[0]
try:
image_str = image_str.replace("data:image/png;base64,", "")
except Exception as exc:
logger.error(exc)
err_msg = str(exc)
return None, f"errors: {err_msg}. Try again."
decoded_bytes = base64.decodebytes(bytes(image_str, "utf-8"))
img = Image.open(io.BytesIO(decoded_bytes))
# generated_images.append(img)
# return generated_images
return img, err_msg
# examples = [["an apple", 125], ["Donald Trump", 125]]
examples = [list(_) for _ in zip(examples_, [125] * len(examples_))]
inputs = [
# "text",
gr.Text(value="a dog with a funny hat"),
gr.Slider(minimum=2, maximum=250, value=115, step=5),
]
iface = gr.Interface(
generate_images,
inputs,
# ["image", gr.Text(value="", label="phrase")],
[gr.Image(label=""), gr.Text(value="", label="phrase")],
examples=examples,
)
iface.launch(enable_queue=True)
|