freemt commited on
Commit
9a87834
·
1 Parent(s): ef2786d

Update multimodalart/latentdiffusion

Browse files
.stignore ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .git
2
+ # Byte-compiled / optimized / DLL files
3
+ __pycache__
4
+ *.py[cod]
5
+ *$py.class
6
+
7
+ # C extensions
8
+ *.so
9
+
10
+ # Distribution / packaging
11
+ .Python
12
+ build
13
+ develop-eggs
14
+ dist
15
+ downloads
16
+ eggs
17
+ .eggs
18
+ lib
19
+ lib64
20
+ parts
21
+ sdist
22
+ var
23
+ wheels
24
+ pip-wheel-metadata
25
+ share/python-wheels
26
+ *.egg-info
27
+ .installed.cfg
28
+ *.egg
29
+ MANIFEST
30
+
31
+ # PyInstaller
32
+ # Usually these files are written by a python script from a template
33
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
34
+ *.manifest
35
+ *.spec
36
+
37
+ # Installer logs
38
+ pip-log.txt
39
+ pip-delete-this-directory.txt
40
+
41
+ # Translations
42
+ *.mo
43
+ *.pot
44
+
45
+ # Django stuff:
46
+ *.log
47
+ local_settings.py
48
+ db.sqlite3
49
+
50
+ # Flask stuff:
51
+ instance
52
+ .webassets-cache
53
+
54
+ # Scrapy stuff:
55
+ .scrapy
56
+
57
+ # Sphinx documentation
58
+ docs/_build
59
+
60
+ # PyBuilder
61
+ target
62
+
63
+ # Jupyter Notebook
64
+ .ipynb_checkpoints
65
+
66
+ # IPython
67
+ profile_default
68
+ ipython_config.py
69
+
70
+ # pyenv
71
+ .python-version
72
+
73
+ # celery beat schedule file
74
+ celerybeat-schedule
75
+
76
+ # SageMath parsed files
77
+ *.sage.py
78
+
79
+ # Environments
80
+ .env
81
+ .venv
82
+ env
83
+ venv
84
+ ENV
85
+ env.bak
86
+ venv.bak
87
+
88
+ # Spyder project settings
89
+ .spyderproject
90
+ .spyproject
91
+
92
+ # Rope project settings
93
+ .ropeproject
94
+
95
+ # mypy
96
+ .mypy_cache
97
+ .dmypy.json
98
+ dmypy.json
99
+
100
+ # Pyre type checker
101
+ .pyre
app-diffusers.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """See https://huggingface.co/fusing/latent-diffusion-text2im-large."""
2
+ from logzero import logger
3
+ from install import install
4
+ try:
5
+ import gradio as gr
6
+ except ModuleNotFoundError:
7
+ try:
8
+ install("gradio")
9
+ import gradio as gr
10
+ except Exception as exc:
11
+ logger.error(exc)
12
+ raise SystemExit(1)
13
+
14
+ import PIL
15
+ from diffusers import DiffusionPipeline
16
+
17
+ ldm = DiffusionPipeline.from_pretrained("fusing/latent-diffusion-text2im-large")
18
+
19
+ generator = torch.manual_seed(42)
20
+
21
+ examples = ["A street sign that reads Huggingface", "A painting of a squirrel eating a burger"]
22
+
23
+ prompt_ = "A painting of a squirrel eating a burger"
24
+
25
+ def fn(prompt=prompt_):
26
+ image = ldm(
27
+ [prompt],
28
+ generator=generator,
29
+ eta=0.3,
30
+ guidance_scale=6.0,
31
+ num_inference_steps=50,
32
+ )
33
+
34
+ image_processed = image.cpu().permute(0, 2, 3, 1)
35
+ image_processed = image_processed * 255.
36
+ image_processed = image_processed.numpy().astype(np.uint8)
37
+ image_pil = PIL.Image.fromarray(image_processed[0])
38
+
39
+ # save image
40
+ # image_pil.save("test.png")
41
+ return image_pil
42
+
43
+ iface = gr.Interface(
44
+ fn=fn,
45
+ inputs="text",
46
+ outputs="image",
47
+ examples=examples,
48
+ live=True,
49
+ )
50
+ iface.launch()
51
+
52
+ # gr.Interface.load("fusing/latent-diffusion-text2im-large", examples=examples).launch()
app.py CHANGED
@@ -1,41 +1,13 @@
1
- """See https://huggingface.co/fusing/latent-diffusion-text2im-large."""
2
  import gradio as gr
3
- import PIL
4
- from diffusers import DiffusionPipeline
5
 
6
- ldm = DiffusionPipeline.from_pretrained("fusing/latent-diffusion-text2im-large")
 
 
 
7
 
8
- generator = torch.manual_seed(42)
9
 
10
- examples = ["A street sign that reads Huggingface", "A painting of a squirrel eating a burger"]
11
 
12
- prompt_ = "A painting of a squirrel eating a burger"
13
-
14
- def fn(prompt=prompt_):
15
- image = ldm(
16
- [prompt],
17
- generator=generator,
18
- eta=0.3,
19
- guidance_scale=6.0,
20
- num_inference_steps=50,
21
- )
22
-
23
- image_processed = image.cpu().permute(0, 2, 3, 1)
24
- image_processed = image_processed * 255.
25
- image_processed = image_processed.numpy().astype(np.uint8)
26
- image_pil = PIL.Image.fromarray(image_processed[0])
27
-
28
- # save image
29
- # image_pil.save("test.png")
30
- return image_pil
31
-
32
- iface = gr.Interface(
33
- fn=fn,
34
- inputs="text",
35
- outputs="image",
36
- examples=examples,
37
- live=True,
38
- )
39
- iface.launch()
40
-
41
- # gr.Interface.load("fusing/latent-diffusion-text2im-large", examples=examples).launch()
 
1
+ """See https://huggingface.co/spaces/Gradio-Blocks/Story-to-video/blob/main/app.py."""
2
  import gradio as gr
 
 
3
 
4
+ # from PIL import Image
5
+ # from transformers import AutoTokenizer, AutoModelForSeq2SeqLM,pipeline
6
+ # import requests
7
+ # import torch
8
 
9
+ image_gen = gr.Interface.load("spaces/multimodalart/latentdiffusion")
10
 
11
+ # image_bytes = image_gen(senten, steps, width, height, num_images, diversity)
12
 
13
+ image_gen.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
example.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from diffusers import UNetUnconditionalModel, DDIMScheduler
3
+ import PIL.Image
4
+ import numpy as np
5
+ import tqdm
6
+
7
+ torch_device = "cuda" if torch.cuda.is_available() else "cpu"
8
+
9
+ # 1. Load models
10
+ scheduler = DDIMScheduler.from_config("fusing/ddpm-celeba-hq", tensor_format="pt")
11
+ unet = UNetUnconditionalModel.from_pretrained("fusing/ddpm-celeba-hq", ddpm=True).to(torch_device)
12
+
13
+ # 2. Sample gaussian noise
14
+ generator = torch.manual_seed(23)
15
+ generator = torch.manual_seed(43)
16
+ unet.image_size = unet.resolution
17
+ image = torch.randn(
18
+ (1, unet.in_channels, unet.image_size, unet.image_size),
19
+ generator=generator,
20
+ )
21
+ image = image.to(torch_device)
22
+
23
+ # 3. Denoise
24
+ num_inference_steps = 50
25
+ num_inference_steps = 5
26
+ eta = 0.0 # <- deterministic sampling
27
+ scheduler.set_timesteps(num_inference_steps)
28
+
29
+ for t in tqdm.tqdm(scheduler.timesteps):
30
+ # 1. predict noise residual
31
+ with torch.no_grad():
32
+ residual = unet(image, t)["sample"]
33
+
34
+ prev_image = scheduler.step(residual, t, image, eta)["prev_sample"]
35
+
36
+ # 3. set current image to prev_image: x_t -> x_t-1
37
+ image = prev_image
38
+
39
+ # 4. process image to PIL
40
+ image_processed = image.cpu().permute(0, 2, 3, 1)
41
+ image_processed = (image_processed + 1.0) * 127.5
42
+ image_processed = image_processed.numpy().astype(np.uint8)
43
+ image_pil = PIL.Image.fromarray(image_processed[0])
44
+
45
+ # 5. save image
46
+ image_pil.save("generated_image.png")
example1.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """https://pypi.org/project/diffusers/.
2
+
3
+ !pip install git+https://github.com/patil-suraj/transformers@ldm-bert
4
+ !git clone https://github.com/huggingface/diffusers
5
+ !cd diffusers && pip install -e .
6
+ !pip install git+file:///content/diffusers
7
+ !pip install torch
8
+
9
+ https://github.com/CompVis/latent-diffusion/blob/main/scripts/txt2img.py
10
+
11
+ https://medium.com/tag/diffusion-models
12
+
13
+ !pip install einops
14
+
15
+ """
16
+ from diffusers import DiffusionPipeline
17
+
18
+ ldm = DiffusionPipeline.from_pretrained("fusing/latent-diffusion-text2im-large")
19
+
20
+ generator = torch.manual_seed(42)
21
+
22
+ prompt = "A painting of a squirrel eating a burger"
23
+ image = ldm([prompt], generator=generator, eta=0.3, guidance_scale=6.0, num_inference_steps=50)
24
+
25
+ image_processed = image.cpu().permute(0, 2, 3, 1)
26
+ image_processed = image_processed * 255.
27
+ image_processed = image_processed.numpy().astype(np.uint8)
28
+ image_pil = PIL.Image.fromarray(image_processed[0])
29
+
30
+ # save image
31
+ image_pil.save("test.png")
generated_image.png ADDED
install-sw.sh ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # pip install pipx
2
+ # pipx install poetry
3
+ # pipx ensurepath
4
+ # source ~/.bashrc
5
+
6
+ # curl -sSL https://install.python-poetry.org | python3 -
7
+ # -C- continue -S show error -o output
8
+ curl -sSL -C- -o install-poetry.py https://install.python-poetry.org
9
+ python install-poetry.py
10
+ rm install-poetry.py
11
+ echo export PATH=~/.local/bin:$PATH > ~/.bashrc
12
+ source ~/.bashrc
13
+ # ~/.local/bin/poetry install
14
+
15
+ wget -c https://deb.nodesource.com/setup_14.x
16
+ bash setup_14.x
17
+ apt-get install -y nodejs
18
+ npm install -g npm@latest
19
+ npm install -g nodemon
20
+ rm setup_14.x
21
+
22
+ # apt upate # alerady done in apt-get install -y nodejs
23
+ apt install byobu -y > /dev/null 2>&1
install-sw1.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # pip install pipx
2
+ # pipx install poetry
3
+ # pipx ensurepath
4
+ # source ~/.bashrc
5
+
6
+ # curl -sSL https://install.python-poetry.org | python3 -
7
+ # -C- continue -S show error -o output
8
+ curl -sSL -C- -o install-poetry.py https://install.python-poetry.org
9
+ python install-poetry.py
10
+ rm install-poetry.py
11
+ echo export PATH=~/.local/bin:$PATH > ~/.bashrc
12
+ source ~/.bashrc
13
+ # ~/.local/bin/poetry install
14
+
15
+ wget -c https://deb.nodesource.com/setup_12.x
16
+ bash setup_12.x
17
+ apt-get install -y nodejs
18
+ npm install -g npm@latest
19
+ npm install -g nodemon
20
+ rm setup_12.x
21
+
22
+ # apt update # alerady done in apt-get install -y nodejs
23
+ apt install byobu -y > /dev/null 2>&1
24
+ byobu-enable
25
+ byobu
okteto-up.bat ADDED
@@ -0,0 +1 @@
 
 
1
+ okteto up
okteto.yml ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: gradio-cmat
2
+
3
+ # The build section defines how to build the images of
4
+ # your development environment
5
+ # More info: https://www.okteto.com/docs/reference/manifest/#build
6
+ # build:
7
+ # my-service:
8
+ # context: .
9
+
10
+ # The deploy section defines how to deploy your development environment
11
+ # More info: https://www.okteto.com/docs/reference/manifest/#deploy
12
+ # deploy:
13
+ # commands:
14
+ # - name: Deploy
15
+ # command: echo 'Replace this line with the proper 'helm'
16
+
17
+ # or 'kubectl' commands to deploy your development environment'
18
+
19
+ # The dependencies section defines other git repositories to be
20
+ # deployed as part of your development environment
21
+ # More info: https://www.okteto.com/docs/reference/manifest/#dependencies
22
+ # dependencies:
23
+ # - https://github.com/okteto/sample
24
+ # The dev section defines how to activate a development container
25
+ # More info: https://www.okteto.com/docs/reference/manifest/#dev
26
+ dev:
27
+ gradio-cmat:
28
+ # image: okteto/dev:latest
29
+ # image: python:3.8.13-bullseye
30
+ # image: simbachain/poetry-3.8
31
+ image: python:3.8
32
+ command: bash
33
+ workdir: /usr/src/app
34
+ sync:
35
+ - .:/usr/src/app
36
+ environment:
37
+ - name=$USER
38
+ forward:
39
+ - 7861:7861
40
+ - 7860:7860
41
+ - 8501:8501
42
+ reverse:
43
+ - 9000:9000
44
+ autocreate: true
poetry.lock ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [[package]]
2
+ name = "colorama"
3
+ version = "0.4.5"
4
+ description = "Cross-platform colored terminal text."
5
+ category = "main"
6
+ optional = false
7
+ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
8
+
9
+ [[package]]
10
+ name = "install"
11
+ version = "1.3.5"
12
+ description = "Install packages from within code"
13
+ category = "main"
14
+ optional = false
15
+ python-versions = ">=2.7, >=3.5"
16
+
17
+ [[package]]
18
+ name = "logzero"
19
+ version = "1.7.0"
20
+ description = "Robust and effective logging for Python 2 and 3"
21
+ category = "main"
22
+ optional = false
23
+ python-versions = "*"
24
+
25
+ [package.dependencies]
26
+ colorama = {version = "*", markers = "sys_platform == \"win32\""}
27
+
28
+ [metadata]
29
+ lock-version = "1.1"
30
+ python-versions = "^3.8"
31
+ content-hash = "99de593375f55736457752f5abe46a79ef864a037b1bd84592a3983d44b410c2"
32
+
33
+ [metadata.files]
34
+ colorama = [
35
+ {file = "colorama-0.4.5-py2.py3-none-any.whl", hash = "sha256:854bf444933e37f5824ae7bfc1e98d5bce2ebe4160d46b5edf346a89358e99da"},
36
+ {file = "colorama-0.4.5.tar.gz", hash = "sha256:e6c6b4334fc50988a639d9b98aa429a0b57da6e17b9a44f0451f930b6967b7a4"},
37
+ ]
38
+ install = [
39
+ {file = "install-1.3.5-py3-none-any.whl", hash = "sha256:0d3fadf4aa62c95efe8d34757c8507eb46177f86c016c21c6551eafc6a53d5a9"},
40
+ {file = "install-1.3.5.tar.gz", hash = "sha256:e67c8a0be5ccf8cb4ffa17d090f3a61b6e820e6a7e21cd1d2c0f7bc59b18e647"},
41
+ ]
42
+ logzero = [
43
+ {file = "logzero-1.7.0-py2.py3-none-any.whl", hash = "sha256:23eb1f717a2736f9ab91ca0d43160fd2c996ad49ae6bad34652d47aba908769d"},
44
+ {file = "logzero-1.7.0.tar.gz", hash = "sha256:7f73ddd3ae393457236f081ffebd044a3aa2e423a47ae6ddb5179ab90d0ad082"},
45
+ ]
pyproject.toml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [tool.poetry]
2
+ name = "app"
3
+ version = "0.1.0"
4
+ description = ""
5
+ authors = ["Your Name <you@example.com>"]
6
+
7
+ [tool.poetry.dependencies]
8
+ python = "^3.8"
9
+ logzero = "^1.7.0"
10
+ install = "^1.3.5"
11
+
12
+ [tool.poetry.dev-dependencies]
13
+
14
+ [build-system]
15
+ requires = ["poetry-core>=1.0.0"]
16
+ build-backend = "poetry.core.masonry.api"
requirements.txt → requirements-diffusers.txt RENAMED
@@ -3,6 +3,7 @@
3
 
4
  # git+https://github.com/huggingface/diffusers.git@v0.0.2-release
5
  # diffusers
 
6
  git+https://github.com/huggingface/diffusers
7
 
8
  # https://github.com/huggingface/diffusers/issues/82
@@ -12,7 +13,13 @@ git+https://github.com/huggingface/diffusers
12
  # pip install git+file:///content/diffusers
13
  # pip install torch
14
 
15
- # git+https://github.com/patil-suraj/transformers.git@ldm-bert
 
 
 
 
16
 
17
  opencv-python
18
- transformers
 
 
 
3
 
4
  # git+https://github.com/huggingface/diffusers.git@v0.0.2-release
5
  # diffusers
6
+
7
  git+https://github.com/huggingface/diffusers
8
 
9
  # https://github.com/huggingface/diffusers/issues/82
 
13
  # pip install git+file:///content/diffusers
14
  # pip install torch
15
 
16
+ https://hub.fastgit.xyz/author/repo
17
+ https://hub.fastgit.org/author/repo
18
+
19
+ git+https://github.com/patil-suraj/transformers.git@ldm-bert
20
+ torch
21
 
22
  opencv-python
23
+ transformers
24
+ install
25
+ logzero