|
|
|
import torch |
|
import numpy as np |
|
from huggingface_hub import HfApi |
|
|
|
from diffusers import ShapEPipeline |
|
from diffusers.utils import export_to_gif |
|
|
|
api = HfApi() |
|
|
|
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') |
|
|
|
batch_size = 1 |
|
guidance_scale = 15.0 |
|
prompt = "a red table" |
|
prompt = "A chair that looks like an avocado" |
|
torch.manual_seed(0) |
|
|
|
repo = "openai/shap-e" |
|
pipe = ShapEPipeline.from_pretrained(repo) |
|
pipe = pipe.to(device) |
|
|
|
generator = torch.Generator(device="cuda").manual_seed(0) |
|
|
|
prompts = [ |
|
"A chair that looks like an avocado", |
|
"An airplane that looks like a banana", |
|
"A spaceship", |
|
"A birthday cupcake", |
|
"A chair that looks like a tree", |
|
"A green boot", |
|
"A penguin", |
|
"Ube ice cream cone", |
|
"A bowl of vegetables", |
|
] |
|
|
|
for prompt in prompts: |
|
images = pipe( |
|
prompt, |
|
num_images_per_prompt=batch_size, |
|
generator=generator, |
|
guidance_scale=guidance_scale, |
|
num_inference_steps=64, |
|
frame_size=256, |
|
output_type='pil' |
|
).images |
|
|
|
path = f"/home/patrick/images/{'_'.join(prompt.split())}.gif" |
|
export_to_gif(images[0], path) |
|
|
|
|
|
api.upload_file( |
|
path_or_fileobj=path, |
|
path_in_repo=path.split("/")[-1], |
|
repo_id="patrickvonplaten/images", |
|
repo_type="dataset", |
|
) |
|
print(f"https://huggingface.co/datasets/patrickvonplaten/images/blob/main/{path.split('/')[-1]}") |
|
|