Spaces:
Running
on
Zero
Running
on
Zero
xinjie.wang
commited on
Commit
·
d1fdb19
1
Parent(s):
ff6cad0
update
Browse files- app.py +6 -7
- embodied_gen/models/texture_model.py +1 -1
- embodied_gen/scripts/texture_gen.sh +1 -1
app.py
CHANGED
@@ -40,6 +40,8 @@ from common import (
|
|
40 |
)
|
41 |
|
42 |
with gr.Blocks(delete_cache=(43200, 43200), theme=custom_theme) as demo:
|
|
|
|
|
43 |
gr.Markdown(
|
44 |
"""
|
45 |
## ***EmbodiedGen***: Image-to-3D Asset
|
@@ -54,21 +56,18 @@ with gr.Blocks(delete_cache=(43200, 43200), theme=custom_theme) as demo:
|
|
54 |
<a href="https://github.com/HorizonRobotics/EmbodiedGen">
|
55 |
<img alt="💻 GitHub" src="https://img.shields.io/badge/GitHub-000000?logo=github">
|
56 |
</a>
|
57 |
-
<a href="https://www.youtube.com/watch?v=
|
58 |
<img alt="🎥 Video" src="https://img.shields.io/badge/🎥-Video-red">
|
59 |
</a>
|
60 |
</p>
|
61 |
|
62 |
🖼️ Generate physically plausible 3D asset from single input image.
|
63 |
-
|
64 |
""".format(
|
65 |
VERSION=VERSION
|
66 |
),
|
67 |
elem_classes=["header"],
|
68 |
)
|
69 |
|
70 |
-
gr.HTML(image_css)
|
71 |
-
# gr.HTML(lighting_css)
|
72 |
with gr.Row():
|
73 |
with gr.Column(scale=2):
|
74 |
with gr.Tabs() as input_tabs:
|
@@ -239,9 +238,8 @@ with gr.Blocks(delete_cache=(43200, 43200), theme=custom_theme) as demo:
|
|
239 |
)
|
240 |
|
241 |
gr.Markdown(
|
242 |
-
""" NOTE: If `Asset Attributes` are provided,
|
243 |
-
|
244 |
-
will be applied. \n
|
245 |
The `Download URDF` file is restored to the real scale and
|
246 |
has quality inspection, open with an editor to view details.
|
247 |
"""
|
@@ -279,6 +277,7 @@ with gr.Blocks(delete_cache=(43200, 43200), theme=custom_theme) as demo:
|
|
279 |
examples_per_page=10,
|
280 |
)
|
281 |
with gr.Column(scale=1):
|
|
|
282 |
video_output = gr.Video(
|
283 |
label="Generated 3D Asset",
|
284 |
autoplay=True,
|
|
|
40 |
)
|
41 |
|
42 |
with gr.Blocks(delete_cache=(43200, 43200), theme=custom_theme) as demo:
|
43 |
+
gr.HTML(image_css, visible=False)
|
44 |
+
# gr.HTML(lighting_css, visible=False)
|
45 |
gr.Markdown(
|
46 |
"""
|
47 |
## ***EmbodiedGen***: Image-to-3D Asset
|
|
|
56 |
<a href="https://github.com/HorizonRobotics/EmbodiedGen">
|
57 |
<img alt="💻 GitHub" src="https://img.shields.io/badge/GitHub-000000?logo=github">
|
58 |
</a>
|
59 |
+
<a href="https://www.youtube.com/watch?v=rG4odybuJRk">
|
60 |
<img alt="🎥 Video" src="https://img.shields.io/badge/🎥-Video-red">
|
61 |
</a>
|
62 |
</p>
|
63 |
|
64 |
🖼️ Generate physically plausible 3D asset from single input image.
|
|
|
65 |
""".format(
|
66 |
VERSION=VERSION
|
67 |
),
|
68 |
elem_classes=["header"],
|
69 |
)
|
70 |
|
|
|
|
|
71 |
with gr.Row():
|
72 |
with gr.Column(scale=2):
|
73 |
with gr.Tabs() as input_tabs:
|
|
|
238 |
)
|
239 |
|
240 |
gr.Markdown(
|
241 |
+
""" NOTE: If `Asset Attributes` are provided, it will guide
|
242 |
+
GPT to perform physical attributes restoration. \n
|
|
|
243 |
The `Download URDF` file is restored to the real scale and
|
244 |
has quality inspection, open with an editor to view details.
|
245 |
"""
|
|
|
277 |
examples_per_page=10,
|
278 |
)
|
279 |
with gr.Column(scale=1):
|
280 |
+
gr.Markdown("<br>")
|
281 |
video_output = gr.Video(
|
282 |
label="Generated 3D Asset",
|
283 |
autoplay=True,
|
embodied_gen/models/texture_model.py
CHANGED
@@ -60,7 +60,7 @@ def build_texture_gen_pipe(
|
|
60 |
)
|
61 |
|
62 |
if controlnet_ckpt is None:
|
63 |
-
suffix = "geo_cond_mv"
|
64 |
model_path = snapshot_download(
|
65 |
repo_id="xinjjj/RoboAssetGen", allow_patterns=f"{suffix}/*"
|
66 |
)
|
|
|
60 |
)
|
61 |
|
62 |
if controlnet_ckpt is None:
|
63 |
+
suffix = "texture_gen_mv_v1" # "geo_cond_mv"
|
64 |
model_path = snapshot_download(
|
65 |
repo_id="xinjjj/RoboAssetGen", allow_patterns=f"{suffix}/*"
|
66 |
)
|
embodied_gen/scripts/texture_gen.sh
CHANGED
@@ -40,7 +40,7 @@ drender-cli --mesh_path ${mesh_path} \
|
|
40 |
# Step 2: multi-view rendering
|
41 |
python embodied_gen/scripts/render_mv.py \
|
42 |
--index_file "${output_root}/condition/index.json" \
|
43 |
-
--controlnet_cond_scale 0.
|
44 |
--guidance_scale 9 \
|
45 |
--strength 0.9 \
|
46 |
--num_inference_steps 40 \
|
|
|
40 |
# Step 2: multi-view rendering
|
41 |
python embodied_gen/scripts/render_mv.py \
|
42 |
--index_file "${output_root}/condition/index.json" \
|
43 |
+
--controlnet_cond_scale 0.7 \
|
44 |
--guidance_scale 9 \
|
45 |
--strength 0.9 \
|
46 |
--num_inference_steps 40 \
|