hysts HF Staff commited on
Commit
e669631
·
1 Parent(s): 0947d18
.pre-commit-config.yaml ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ repos:
2
+ - repo: https://github.com/pre-commit/pre-commit-hooks
3
+ rev: v5.0.0
4
+ hooks:
5
+ - id: check-executables-have-shebangs
6
+ - id: check-json
7
+ - id: check-merge-conflict
8
+ - id: check-shebang-scripts-are-executable
9
+ - id: check-toml
10
+ - id: check-yaml
11
+ - id: end-of-file-fixer
12
+ - id: mixed-line-ending
13
+ args: ["--fix=lf"]
14
+ - id: requirements-txt-fixer
15
+ - id: trailing-whitespace
16
+ - repo: https://github.com/astral-sh/ruff-pre-commit
17
+ rev: v0.11.11
18
+ hooks:
19
+ - id: ruff-check
20
+ args: ["--fix"]
21
+ - id: ruff-format
22
+ - repo: https://github.com/pre-commit/mirrors-mypy
23
+ rev: v1.15.0
24
+ hooks:
25
+ - id: mypy
26
+ args: ["--ignore-missing-imports"]
27
+ additional_dependencies:
28
+ [
29
+ "types-python-slugify",
30
+ "types-pytz",
31
+ "types-PyYAML",
32
+ "types-requests",
33
+ ]
.python-version ADDED
@@ -0,0 +1 @@
 
 
1
+ 3.10
.vscode/extensions.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "recommendations": [
3
+ "ms-python.python",
4
+ "charliermarsh.ruff",
5
+ "streetsidesoftware.code-spell-checker",
6
+ "tamasfe.even-better-toml"
7
+ ]
8
+ }
.vscode/settings.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "editor.formatOnSave": true,
3
+ "files.insertFinalNewline": false,
4
+ "[python]": {
5
+ "editor.defaultFormatter": "charliermarsh.ruff",
6
+ "editor.formatOnType": true,
7
+ "editor.codeActionsOnSave": {
8
+ "source.fixAll.ruff": "explicit",
9
+ "source.organizeImports": "explicit"
10
+ }
11
+ },
12
+ "[jupyter]": {
13
+ "files.insertFinalNewline": false
14
+ },
15
+ "notebook.output.scrolling": true,
16
+ "notebook.formatOnSave.enabled": true
17
+ }
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: 🏢
4
  colorFrom: indigo
5
  colorTo: blue
6
  sdk: gradio
7
- sdk_version: 4.44.1
8
  app_file: app.py
9
  pinned: false
10
  license: mit
@@ -13,4 +13,4 @@ short_description: Scalable and Versatile 3D Generation from images
13
 
14
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
15
 
16
- Paper: https://huggingface.co/papers/2412.01506
 
4
  colorFrom: indigo
5
  colorTo: blue
6
  sdk: gradio
7
+ sdk_version: 5.32.0
8
  app_file: app.py
9
  pinned: false
10
  license: mit
 
13
 
14
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
15
 
16
+ Paper: https://huggingface.co/papers/2412.01506
app.py CHANGED
@@ -1,31 +1,45 @@
1
- import gradio as gr
2
- import spaces
3
- from gradio_litmodel3d import LitModel3D
4
-
5
  import os
 
6
  import shutil
7
- os.environ['SPCONV_ALGO'] = 'native'
8
  from typing import *
9
- import torch
10
- import numpy as np
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  import imageio
 
 
 
12
  from easydict import EasyDict as edict
13
  from PIL import Image
 
14
  from trellis.pipelines import TrellisImageTo3DPipeline
15
  from trellis.representations import Gaussian, MeshExtractResult
16
- from trellis.utils import render_utils, postprocessing_utils
17
-
18
 
19
  MAX_SEED = np.iinfo(np.int32).max
20
- TMP_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tmp')
21
  os.makedirs(TMP_DIR, exist_ok=True)
22
 
23
 
24
  def start_session(req: gr.Request):
25
  user_dir = os.path.join(TMP_DIR, str(req.session_hash))
26
  os.makedirs(user_dir, exist_ok=True)
27
-
28
-
29
  def end_session(req: gr.Request):
30
  user_dir = os.path.join(TMP_DIR, str(req.session_hash))
31
  shutil.rmtree(user_dir)
@@ -48,10 +62,10 @@ def preprocess_image(image: Image.Image) -> Image.Image:
48
  def preprocess_images(images: List[Tuple[Image.Image, str]]) -> List[Image.Image]:
49
  """
50
  Preprocess a list of input images.
51
-
52
  Args:
53
  images (List[Tuple[Image.Image, str]]): The input images.
54
-
55
  Returns:
56
  List[Image.Image]: The preprocessed images.
57
  """
@@ -62,41 +76,41 @@ def preprocess_images(images: List[Tuple[Image.Image, str]]) -> List[Image.Image
62
 
63
  def pack_state(gs: Gaussian, mesh: MeshExtractResult) -> dict:
64
  return {
65
- 'gaussian': {
66
  **gs.init_params,
67
- '_xyz': gs._xyz.cpu().numpy(),
68
- '_features_dc': gs._features_dc.cpu().numpy(),
69
- '_scaling': gs._scaling.cpu().numpy(),
70
- '_rotation': gs._rotation.cpu().numpy(),
71
- '_opacity': gs._opacity.cpu().numpy(),
72
  },
73
- 'mesh': {
74
- 'vertices': mesh.vertices.cpu().numpy(),
75
- 'faces': mesh.faces.cpu().numpy(),
76
  },
77
  }
78
-
79
-
80
  def unpack_state(state: dict) -> Tuple[Gaussian, edict, str]:
81
  gs = Gaussian(
82
- aabb=state['gaussian']['aabb'],
83
- sh_degree=state['gaussian']['sh_degree'],
84
- mininum_kernel_size=state['gaussian']['mininum_kernel_size'],
85
- scaling_bias=state['gaussian']['scaling_bias'],
86
- opacity_bias=state['gaussian']['opacity_bias'],
87
- scaling_activation=state['gaussian']['scaling_activation'],
88
  )
89
- gs._xyz = torch.tensor(state['gaussian']['_xyz'], device='cuda')
90
- gs._features_dc = torch.tensor(state['gaussian']['_features_dc'], device='cuda')
91
- gs._scaling = torch.tensor(state['gaussian']['_scaling'], device='cuda')
92
- gs._rotation = torch.tensor(state['gaussian']['_rotation'], device='cuda')
93
- gs._opacity = torch.tensor(state['gaussian']['_opacity'], device='cuda')
94
-
95
  mesh = edict(
96
- vertices=torch.tensor(state['mesh']['vertices'], device='cuda'),
97
- faces=torch.tensor(state['mesh']['faces'], device='cuda'),
98
  )
99
-
100
  return gs, mesh
101
 
102
 
@@ -170,12 +184,14 @@ def image_to_3d(
170
  },
171
  mode=multiimage_algo,
172
  )
173
- video = render_utils.render_video(outputs['gaussian'][0], num_frames=120)['color']
174
- video_geo = render_utils.render_video(outputs['mesh'][0], num_frames=120)['normal']
175
- video = [np.concatenate([video[i], video_geo[i]], axis=1) for i in range(len(video))]
176
- video_path = os.path.join(user_dir, 'sample.mp4')
 
 
177
  imageio.mimsave(video_path, video, fps=15)
178
- state = pack_state(outputs['gaussian'][0], outputs['mesh'][0])
179
  torch.cuda.empty_cache()
180
  return state, video_path
181
 
@@ -200,8 +216,10 @@ def extract_glb(
200
  """
201
  user_dir = os.path.join(TMP_DIR, str(req.session_hash))
202
  gs, mesh = unpack_state(state)
203
- glb = postprocessing_utils.to_glb(gs, mesh, simplify=mesh_simplify, texture_size=texture_size, verbose=False)
204
- glb_path = os.path.join(user_dir, 'sample.glb')
 
 
205
  glb.export(glb_path)
206
  torch.cuda.empty_cache()
207
  return glb_path, glb_path
@@ -220,19 +238,21 @@ def extract_gaussian(state: dict, req: gr.Request) -> Tuple[str, str]:
220
  """
221
  user_dir = os.path.join(TMP_DIR, str(req.session_hash))
222
  gs, _ = unpack_state(state)
223
- gaussian_path = os.path.join(user_dir, 'sample.ply')
224
  gs.save_ply(gaussian_path)
225
  torch.cuda.empty_cache()
226
  return gaussian_path, gaussian_path
227
 
228
 
229
  def prepare_multi_example() -> List[Image.Image]:
230
- multi_case = list(set([i.split('_')[0] for i in os.listdir("assets/example_multi_image")]))
 
 
231
  images = []
232
  for case in multi_case:
233
  _images = []
234
  for i in range(1, 4):
235
- img = Image.open(f'assets/example_multi_image/{case}_{i}.png')
236
  W, H = img.size
237
  img = img.resize((int(W / H * 512), 512))
238
  _images.append(np.array(img))
@@ -246,12 +266,12 @@ def split_image(image: Image.Image) -> List[Image.Image]:
246
  """
247
  image = np.array(image)
248
  alpha = image[..., 3]
249
- alpha = np.any(alpha>0, axis=0)
250
  start_pos = np.where(~alpha[:-1] & alpha[1:])[0].tolist()
251
  end_pos = np.where(alpha[:-1] & ~alpha[1:])[0].tolist()
252
  images = []
253
  for s, e in zip(start_pos, end_pos):
254
- images.append(Image.fromarray(image[:, s:e+1]))
255
  return [preprocess_image(image) for image in images]
256
 
257
 
@@ -263,39 +283,67 @@ with gr.Blocks(delete_cache=(600, 600)) as demo:
263
 
264
  ✨New: 1) Experimental multi-image support. 2) Gaussian file extraction.
265
  """)
266
-
267
  with gr.Row():
268
  with gr.Column():
269
  with gr.Tabs() as input_tabs:
270
  with gr.Tab(label="Single Image", id=0) as single_image_input_tab:
271
- image_prompt = gr.Image(label="Image Prompt", format="png", image_mode="RGBA", type="pil", height=300)
 
 
 
 
 
 
272
  with gr.Tab(label="Multiple Images", id=1) as multiimage_input_tab:
273
- multiimage_prompt = gr.Gallery(label="Image Prompt", format="png", type="pil", height=300, columns=3)
 
 
 
 
 
 
274
  gr.Markdown("""
275
  Input different views of the object in separate images.
276
 
277
  *NOTE: this is an experimental algorithm without training a specialized model. It may not produce the best results for all images, especially those having different poses or inconsistent details.*
278
  """)
279
-
280
  with gr.Accordion(label="Generation Settings", open=False):
281
  seed = gr.Slider(0, MAX_SEED, label="Seed", value=0, step=1)
282
  randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
283
  gr.Markdown("Stage 1: Sparse Structure Generation")
284
  with gr.Row():
285
- ss_guidance_strength = gr.Slider(0.0, 10.0, label="Guidance Strength", value=7.5, step=0.1)
286
- ss_sampling_steps = gr.Slider(1, 50, label="Sampling Steps", value=12, step=1)
 
 
 
 
287
  gr.Markdown("Stage 2: Structured Latent Generation")
288
  with gr.Row():
289
- slat_guidance_strength = gr.Slider(0.0, 10.0, label="Guidance Strength", value=3.0, step=0.1)
290
- slat_sampling_steps = gr.Slider(1, 50, label="Sampling Steps", value=12, step=1)
291
- multiimage_algo = gr.Radio(["stochastic", "multidiffusion"], label="Multi-image Algorithm", value="stochastic")
 
 
 
 
 
 
 
 
292
 
293
  generate_btn = gr.Button("Generate")
294
-
295
  with gr.Accordion(label="GLB Extraction Settings", open=False):
296
- mesh_simplify = gr.Slider(0.9, 0.98, label="Simplify", value=0.95, step=0.01)
297
- texture_size = gr.Slider(512, 2048, label="Texture Size", value=1024, step=512)
298
-
 
 
 
 
299
  with gr.Row():
300
  extract_glb_btn = gr.Button("Extract GLB", interactive=False)
301
  extract_gs_btn = gr.Button("Extract Gaussian", interactive=False)
@@ -304,13 +352,19 @@ with gr.Blocks(delete_cache=(600, 600)) as demo:
304
  """)
305
 
306
  with gr.Column():
307
- video_output = gr.Video(label="Generated 3D Asset", autoplay=True, loop=True, height=300)
308
- model_output = LitModel3D(label="Extracted GLB/Gaussian", exposure=10.0, height=300)
309
-
 
 
310
  with gr.Row():
311
- download_glb = gr.DownloadButton(label="Download GLB", interactive=False)
312
- download_gs = gr.DownloadButton(label="Download Gaussian", interactive=False)
313
-
 
 
 
 
314
  is_multiimage = gr.State(False)
315
  output_buf = gr.State()
316
 
@@ -318,7 +372,7 @@ with gr.Blocks(delete_cache=(600, 600)) as demo:
318
  with gr.Row() as single_image_example:
319
  examples = gr.Examples(
320
  examples=[
321
- f'assets/example_image/{image}'
322
  for image in os.listdir("assets/example_image")
323
  ],
324
  inputs=[image_prompt],
@@ -340,16 +394,20 @@ with gr.Blocks(delete_cache=(600, 600)) as demo:
340
  # Handlers
341
  demo.load(start_session)
342
  demo.unload(end_session)
343
-
344
  single_image_input_tab.select(
345
- lambda: tuple([False, gr.Row.update(visible=True), gr.Row.update(visible=False)]),
346
- outputs=[is_multiimage, single_image_example, multiimage_example]
 
 
347
  )
348
  multiimage_input_tab.select(
349
- lambda: tuple([True, gr.Row.update(visible=False), gr.Row.update(visible=True)]),
350
- outputs=[is_multiimage, single_image_example, multiimage_example]
 
 
351
  )
352
-
353
  image_prompt.upload(
354
  preprocess_image,
355
  inputs=[image_prompt],
@@ -367,7 +425,17 @@ with gr.Blocks(delete_cache=(600, 600)) as demo:
367
  outputs=[seed],
368
  ).then(
369
  image_to_3d,
370
- inputs=[image_prompt, multiimage_prompt, is_multiimage, seed, ss_guidance_strength, ss_sampling_steps, slat_guidance_strength, slat_sampling_steps, multiimage_algo],
 
 
 
 
 
 
 
 
 
 
371
  outputs=[output_buf, video_output],
372
  ).then(
373
  lambda: tuple([gr.Button(interactive=True), gr.Button(interactive=True)]),
@@ -387,7 +455,7 @@ with gr.Blocks(delete_cache=(600, 600)) as demo:
387
  lambda: gr.Button(interactive=True),
388
  outputs=[download_glb],
389
  )
390
-
391
  extract_gs_btn.click(
392
  extract_gaussian,
393
  inputs=[output_buf],
@@ -401,14 +469,16 @@ with gr.Blocks(delete_cache=(600, 600)) as demo:
401
  lambda: gr.Button(interactive=False),
402
  outputs=[download_glb],
403
  )
404
-
405
 
406
  # Launch the Gradio app
407
  if __name__ == "__main__":
408
- pipeline = TrellisImageTo3DPipeline.from_pretrained("JeffreyXiang/TRELLIS-image-large")
409
  pipeline.cuda()
410
  try:
411
- pipeline.preprocess_image(Image.fromarray(np.zeros((512, 512, 3), dtype=np.uint8))) # Preload rembg
 
 
412
  except:
413
  pass
414
- demo.launch()
 
 
 
 
 
1
  import os
2
+ import shlex
3
  import shutil
4
+ import subprocess
5
  from typing import *
6
+
7
+ os.environ["SPCONV_ALGO"] = "native"
8
+
9
+ if os.getenv("SPACE_ID"):
10
+ subprocess.run(
11
+ shlex.split(
12
+ "pip install wheels/diff_gaussian_rasterization-0.0.0-cp310-cp310-linux_x86_64.whl"
13
+ ),
14
+ check=True,
15
+ )
16
+ subprocess.run(
17
+ shlex.split("pip install wheels/nvdiffrast-0.3.3-cp310-cp310-linux_x86_64.whl"),
18
+ check=True,
19
+ )
20
+
21
+ import gradio as gr
22
  import imageio
23
+ import numpy as np
24
+ import spaces
25
+ import torch
26
  from easydict import EasyDict as edict
27
  from PIL import Image
28
+
29
  from trellis.pipelines import TrellisImageTo3DPipeline
30
  from trellis.representations import Gaussian, MeshExtractResult
31
+ from trellis.utils import postprocessing_utils, render_utils
 
32
 
33
  MAX_SEED = np.iinfo(np.int32).max
34
+ TMP_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "tmp")
35
  os.makedirs(TMP_DIR, exist_ok=True)
36
 
37
 
38
  def start_session(req: gr.Request):
39
  user_dir = os.path.join(TMP_DIR, str(req.session_hash))
40
  os.makedirs(user_dir, exist_ok=True)
41
+
42
+
43
  def end_session(req: gr.Request):
44
  user_dir = os.path.join(TMP_DIR, str(req.session_hash))
45
  shutil.rmtree(user_dir)
 
62
  def preprocess_images(images: List[Tuple[Image.Image, str]]) -> List[Image.Image]:
63
  """
64
  Preprocess a list of input images.
65
+
66
  Args:
67
  images (List[Tuple[Image.Image, str]]): The input images.
68
+
69
  Returns:
70
  List[Image.Image]: The preprocessed images.
71
  """
 
76
 
77
  def pack_state(gs: Gaussian, mesh: MeshExtractResult) -> dict:
78
  return {
79
+ "gaussian": {
80
  **gs.init_params,
81
+ "_xyz": gs._xyz.cpu().numpy(),
82
+ "_features_dc": gs._features_dc.cpu().numpy(),
83
+ "_scaling": gs._scaling.cpu().numpy(),
84
+ "_rotation": gs._rotation.cpu().numpy(),
85
+ "_opacity": gs._opacity.cpu().numpy(),
86
  },
87
+ "mesh": {
88
+ "vertices": mesh.vertices.cpu().numpy(),
89
+ "faces": mesh.faces.cpu().numpy(),
90
  },
91
  }
92
+
93
+
94
  def unpack_state(state: dict) -> Tuple[Gaussian, edict, str]:
95
  gs = Gaussian(
96
+ aabb=state["gaussian"]["aabb"],
97
+ sh_degree=state["gaussian"]["sh_degree"],
98
+ mininum_kernel_size=state["gaussian"]["mininum_kernel_size"],
99
+ scaling_bias=state["gaussian"]["scaling_bias"],
100
+ opacity_bias=state["gaussian"]["opacity_bias"],
101
+ scaling_activation=state["gaussian"]["scaling_activation"],
102
  )
103
+ gs._xyz = torch.tensor(state["gaussian"]["_xyz"], device="cuda")
104
+ gs._features_dc = torch.tensor(state["gaussian"]["_features_dc"], device="cuda")
105
+ gs._scaling = torch.tensor(state["gaussian"]["_scaling"], device="cuda")
106
+ gs._rotation = torch.tensor(state["gaussian"]["_rotation"], device="cuda")
107
+ gs._opacity = torch.tensor(state["gaussian"]["_opacity"], device="cuda")
108
+
109
  mesh = edict(
110
+ vertices=torch.tensor(state["mesh"]["vertices"], device="cuda"),
111
+ faces=torch.tensor(state["mesh"]["faces"], device="cuda"),
112
  )
113
+
114
  return gs, mesh
115
 
116
 
 
184
  },
185
  mode=multiimage_algo,
186
  )
187
+ video = render_utils.render_video(outputs["gaussian"][0], num_frames=120)["color"]
188
+ video_geo = render_utils.render_video(outputs["mesh"][0], num_frames=120)["normal"]
189
+ video = [
190
+ np.concatenate([video[i], video_geo[i]], axis=1) for i in range(len(video))
191
+ ]
192
+ video_path = os.path.join(user_dir, "sample.mp4")
193
  imageio.mimsave(video_path, video, fps=15)
194
+ state = pack_state(outputs["gaussian"][0], outputs["mesh"][0])
195
  torch.cuda.empty_cache()
196
  return state, video_path
197
 
 
216
  """
217
  user_dir = os.path.join(TMP_DIR, str(req.session_hash))
218
  gs, mesh = unpack_state(state)
219
+ glb = postprocessing_utils.to_glb(
220
+ gs, mesh, simplify=mesh_simplify, texture_size=texture_size, verbose=False
221
+ )
222
+ glb_path = os.path.join(user_dir, "sample.glb")
223
  glb.export(glb_path)
224
  torch.cuda.empty_cache()
225
  return glb_path, glb_path
 
238
  """
239
  user_dir = os.path.join(TMP_DIR, str(req.session_hash))
240
  gs, _ = unpack_state(state)
241
+ gaussian_path = os.path.join(user_dir, "sample.ply")
242
  gs.save_ply(gaussian_path)
243
  torch.cuda.empty_cache()
244
  return gaussian_path, gaussian_path
245
 
246
 
247
  def prepare_multi_example() -> List[Image.Image]:
248
+ multi_case = list(
249
+ set([i.split("_")[0] for i in os.listdir("assets/example_multi_image")])
250
+ )
251
  images = []
252
  for case in multi_case:
253
  _images = []
254
  for i in range(1, 4):
255
+ img = Image.open(f"assets/example_multi_image/{case}_{i}.png")
256
  W, H = img.size
257
  img = img.resize((int(W / H * 512), 512))
258
  _images.append(np.array(img))
 
266
  """
267
  image = np.array(image)
268
  alpha = image[..., 3]
269
+ alpha = np.any(alpha > 0, axis=0)
270
  start_pos = np.where(~alpha[:-1] & alpha[1:])[0].tolist()
271
  end_pos = np.where(alpha[:-1] & ~alpha[1:])[0].tolist()
272
  images = []
273
  for s, e in zip(start_pos, end_pos):
274
+ images.append(Image.fromarray(image[:, s : e + 1]))
275
  return [preprocess_image(image) for image in images]
276
 
277
 
 
283
 
284
  ✨New: 1) Experimental multi-image support. 2) Gaussian file extraction.
285
  """)
286
+
287
  with gr.Row():
288
  with gr.Column():
289
  with gr.Tabs() as input_tabs:
290
  with gr.Tab(label="Single Image", id=0) as single_image_input_tab:
291
+ image_prompt = gr.Image(
292
+ label="Image Prompt",
293
+ format="png",
294
+ image_mode="RGBA",
295
+ type="pil",
296
+ height=300,
297
+ )
298
  with gr.Tab(label="Multiple Images", id=1) as multiimage_input_tab:
299
+ multiimage_prompt = gr.Gallery(
300
+ label="Image Prompt",
301
+ format="png",
302
+ type="pil",
303
+ height=300,
304
+ columns=3,
305
+ )
306
  gr.Markdown("""
307
  Input different views of the object in separate images.
308
 
309
  *NOTE: this is an experimental algorithm without training a specialized model. It may not produce the best results for all images, especially those having different poses or inconsistent details.*
310
  """)
311
+
312
  with gr.Accordion(label="Generation Settings", open=False):
313
  seed = gr.Slider(0, MAX_SEED, label="Seed", value=0, step=1)
314
  randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
315
  gr.Markdown("Stage 1: Sparse Structure Generation")
316
  with gr.Row():
317
+ ss_guidance_strength = gr.Slider(
318
+ 0.0, 10.0, label="Guidance Strength", value=7.5, step=0.1
319
+ )
320
+ ss_sampling_steps = gr.Slider(
321
+ 1, 50, label="Sampling Steps", value=12, step=1
322
+ )
323
  gr.Markdown("Stage 2: Structured Latent Generation")
324
  with gr.Row():
325
+ slat_guidance_strength = gr.Slider(
326
+ 0.0, 10.0, label="Guidance Strength", value=3.0, step=0.1
327
+ )
328
+ slat_sampling_steps = gr.Slider(
329
+ 1, 50, label="Sampling Steps", value=12, step=1
330
+ )
331
+ multiimage_algo = gr.Radio(
332
+ ["stochastic", "multidiffusion"],
333
+ label="Multi-image Algorithm",
334
+ value="stochastic",
335
+ )
336
 
337
  generate_btn = gr.Button("Generate")
338
+
339
  with gr.Accordion(label="GLB Extraction Settings", open=False):
340
+ mesh_simplify = gr.Slider(
341
+ 0.9, 0.98, label="Simplify", value=0.95, step=0.01
342
+ )
343
+ texture_size = gr.Slider(
344
+ 512, 2048, label="Texture Size", value=1024, step=512
345
+ )
346
+
347
  with gr.Row():
348
  extract_glb_btn = gr.Button("Extract GLB", interactive=False)
349
  extract_gs_btn = gr.Button("Extract Gaussian", interactive=False)
 
352
  """)
353
 
354
  with gr.Column():
355
+ video_output = gr.Video(
356
+ label="Generated 3D Asset", autoplay=True, loop=True, height=300
357
+ )
358
+ model_output = gr.Model3D(label="Extracted GLB/Gaussian", height=300)
359
+
360
  with gr.Row():
361
+ download_glb = gr.DownloadButton(
362
+ label="Download GLB", interactive=False
363
+ )
364
+ download_gs = gr.DownloadButton(
365
+ label="Download Gaussian", interactive=False
366
+ )
367
+
368
  is_multiimage = gr.State(False)
369
  output_buf = gr.State()
370
 
 
372
  with gr.Row() as single_image_example:
373
  examples = gr.Examples(
374
  examples=[
375
+ f"assets/example_image/{image}"
376
  for image in os.listdir("assets/example_image")
377
  ],
378
  inputs=[image_prompt],
 
394
  # Handlers
395
  demo.load(start_session)
396
  demo.unload(end_session)
397
+
398
  single_image_input_tab.select(
399
+ lambda: tuple(
400
+ [False, gr.Row.update(visible=True), gr.Row.update(visible=False)]
401
+ ),
402
+ outputs=[is_multiimage, single_image_example, multiimage_example],
403
  )
404
  multiimage_input_tab.select(
405
+ lambda: tuple(
406
+ [True, gr.Row.update(visible=False), gr.Row.update(visible=True)]
407
+ ),
408
+ outputs=[is_multiimage, single_image_example, multiimage_example],
409
  )
410
+
411
  image_prompt.upload(
412
  preprocess_image,
413
  inputs=[image_prompt],
 
425
  outputs=[seed],
426
  ).then(
427
  image_to_3d,
428
+ inputs=[
429
+ image_prompt,
430
+ multiimage_prompt,
431
+ is_multiimage,
432
+ seed,
433
+ ss_guidance_strength,
434
+ ss_sampling_steps,
435
+ slat_guidance_strength,
436
+ slat_sampling_steps,
437
+ multiimage_algo,
438
+ ],
439
  outputs=[output_buf, video_output],
440
  ).then(
441
  lambda: tuple([gr.Button(interactive=True), gr.Button(interactive=True)]),
 
455
  lambda: gr.Button(interactive=True),
456
  outputs=[download_glb],
457
  )
458
+
459
  extract_gs_btn.click(
460
  extract_gaussian,
461
  inputs=[output_buf],
 
469
  lambda: gr.Button(interactive=False),
470
  outputs=[download_glb],
471
  )
472
+
473
 
474
  # Launch the Gradio app
475
  if __name__ == "__main__":
476
+ pipeline = TrellisImageTo3DPipeline.from_pretrained("microsoft/TRELLIS-image-large")
477
  pipeline.cuda()
478
  try:
479
+ pipeline.preprocess_image(
480
+ Image.fromarray(np.zeros((512, 512, 3), dtype=np.uint8))
481
+ ) # Preload rembg
482
  except:
483
  pass
484
+ demo.launch(mcp_server=True)
pyproject.toml ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "trellis"
3
+ version = "0.1.0"
4
+ description = ""
5
+ readme = "README.md"
6
+ requires-python = ">=3.10"
7
+ dependencies = [
8
+ "easydict>=1.13",
9
+ "flash-attn",
10
+ "gradio[mcp]>=5.32.0",
11
+ "hf-transfer>=0.1.9",
12
+ "hf-xet>=1.1.2",
13
+ "igraph>=0.11.8",
14
+ "imageio[ffmpeg]>=2.37.0",
15
+ "onnxruntime>=1.22.0",
16
+ "opencv-python-headless>=4.11.0.86",
17
+ "pymeshfix>=0.17.1",
18
+ "pyvista>=0.45.2",
19
+ "rembg>=2.0.66",
20
+ "scipy>=1.15.3",
21
+ "spaces>=0.36.0",
22
+ "spconv-cu120>=2.3.6",
23
+ "torch==2.4.0",
24
+ "torchvision>=0.19.0",
25
+ "transformers>=4.52.3",
26
+ "trimesh>=4.6.10",
27
+ "utils3d",
28
+ "xatlas>=0.0.10",
29
+ "xformers>=0.0.27.post2",
30
+ ]
31
+
32
+ [tool.uv.sources]
33
+ flash-attn = { url = "https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.0.post2/flash_attn-2.7.0.post2+cu12torch2.4cxx11abiFALSE-cp310-cp310-linux_x86_64.whl" }
34
+ utils3d = { git = "https://github.com/EasternJournalist/utils3d.git", rev = "9a4eb15e4021b67b12c460c7057d642626897ec8" }
35
+ diff-gaussian-rasterization = { path = "wheels/diff_gaussian_rasterization-0.0.0-cp310-cp310-linux_x86_64.whl" }
36
+ nvdiffrast = { path = "wheels/nvdiffrast-0.3.3-cp310-cp310-linux_x86_64.whl" }
37
+
38
+ [dependency-groups]
39
+ dev = [
40
+ "diff-gaussian-rasterization",
41
+ "nvdiffrast",
42
+ "setuptools>=80.8.0",
43
+ ]
44
+
45
+ #[tool.ruff]
46
+ #line-length = 119
47
+ #
48
+ #[tool.ruff.lint]
49
+ #select = ["ALL"]
50
+ #ignore = [
51
+ # "COM812", # missing-trailing-comma
52
+ # "D203", # one-blank-line-before-class
53
+ # "D213", # multi-line-summary-second-line
54
+ # "E501", # line-too-long
55
+ # "SIM117", # multiple-with-statements
56
+ # #
57
+ # "D100", # undocumented-public-module
58
+ # "D101", # undocumented-public-class
59
+ # "D102", # undocumented-public-method
60
+ # "D103", # undocumented-public-function
61
+ # "D104", # undocumented-public-package
62
+ # "D105", # undocumented-magic-method
63
+ # "D107", # undocumented-public-init
64
+ # "EM101", # raw-string-in-exception
65
+ # "FBT001", # boolean-type-hint-positional-argument
66
+ # "FBT002", # boolean-default-value-positional-argument
67
+ # "PD901", # pandas-df-variable-name
68
+ # "PGH003", # blanket-type-ignore
69
+ # "PLR0913", # too-many-arguments
70
+ # "PLR0915", # too-many-statements
71
+ # "TRY003", # raise-vanilla-args
72
+ #]
73
+ #unfixable = [
74
+ # "F401", # unused-import
75
+ #]
76
+ #
77
+ #[tool.ruff.lint.pydocstyle]
78
+ #convention = "google"
79
+ #
80
+ #[tool.ruff.lint.per-file-ignores]
81
+ #"*.ipynb" = ["T201", "T203"]
82
+ #
83
+ #[tool.ruff.format]
84
+ #docstring-code-format = true
requirements.txt CHANGED
@@ -1,26 +1,456 @@
1
- --extra-index-url https://download.pytorch.org/whl/cu121
2
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  torch==2.4.0
 
 
 
 
 
4
  torchvision==0.19.0
5
- pillow==10.4.0
6
- imageio==2.36.1
7
- imageio-ffmpeg==0.5.1
8
  tqdm==4.67.1
9
- easydict==1.13
10
- opencv-python-headless==4.10.0.84
11
- scipy==1.14.1
12
- rembg==2.0.60
13
- onnxruntime==1.20.1
14
- trimesh==4.5.3
15
- xatlas==0.0.9
16
- pyvista==0.44.2
17
- pymeshfix==0.17.0
18
- igraph==0.11.8
19
- git+https://github.com/EasternJournalist/utils3d.git@9a4eb15e4021b67b12c460c7057d642626897ec8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  xformers==0.0.27.post2
21
- spconv-cu120==2.3.6
22
- transformers==4.46.3
23
- gradio_litmodel3d==0.0.1
24
- https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.0.post2/flash_attn-2.7.0.post2+cu12torch2.4cxx11abiFALSE-cp310-cp310-linux_x86_64.whl
25
- https://huggingface.co/spaces/JeffreyXiang/TRELLIS/resolve/main/wheels/diff_gaussian_rasterization-0.0.0-cp310-cp310-linux_x86_64.whl?download=true
26
- https://huggingface.co/spaces/JeffreyXiang/TRELLIS/resolve/main/wheels/nvdiffrast-0.3.3-cp310-cp310-linux_x86_64.whl?download=true
 
1
+ # This file was autogenerated by uv via the following command:
2
+ # uv pip compile pyproject.toml -o requirements.txt
3
+ aiofiles==24.1.0
4
+ # via gradio
5
+ annotated-types==0.7.0
6
+ # via pydantic
7
+ anyio==4.9.0
8
+ # via
9
+ # gradio
10
+ # httpx
11
+ # mcp
12
+ # sse-starlette
13
+ # starlette
14
+ attrs==25.3.0
15
+ # via
16
+ # jsonschema
17
+ # referencing
18
+ ccimport==0.4.4
19
+ # via
20
+ # pccm
21
+ # spconv-cu120
22
+ certifi==2025.4.26
23
+ # via
24
+ # httpcore
25
+ # httpx
26
+ # requests
27
+ charset-normalizer==3.4.2
28
+ # via requests
29
+ click==8.2.1
30
+ # via
31
+ # typer
32
+ # uvicorn
33
+ coloredlogs==15.0.1
34
+ # via onnxruntime
35
+ contourpy==1.3.2
36
+ # via matplotlib
37
+ cumm-cu120==0.4.11
38
+ # via spconv-cu120
39
+ cycler==0.12.1
40
+ # via matplotlib
41
+ easydict==1.13
42
+ # via trellis (pyproject.toml)
43
+ einops==0.8.1
44
+ # via flash-attn
45
+ exceptiongroup==1.3.0
46
+ # via anyio
47
+ fastapi==0.115.12
48
+ # via gradio
49
+ ffmpy==0.5.0
50
+ # via gradio
51
+ filelock==3.18.0
52
+ # via
53
+ # huggingface-hub
54
+ # torch
55
+ # transformers
56
+ # triton
57
+ fire==0.7.0
58
+ # via
59
+ # cumm-cu120
60
+ # pccm
61
+ # spconv-cu120
62
+ flash-attn @ https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.0.post2/flash_attn-2.7.0.post2+cu12torch2.4cxx11abiFALSE-cp310-cp310-linux_x86_64.whl
63
+ # via trellis (pyproject.toml)
64
+ flatbuffers==25.2.10
65
+ # via onnxruntime
66
+ fonttools==4.58.1
67
+ # via matplotlib
68
+ fsspec==2025.5.1
69
+ # via
70
+ # gradio-client
71
+ # huggingface-hub
72
+ # torch
73
+ glcontext==3.0.0
74
+ # via moderngl
75
+ gradio==5.32.0
76
+ # via
77
+ # trellis (pyproject.toml)
78
+ # spaces
79
+ gradio-client==1.10.2
80
+ # via gradio
81
+ groovy==0.1.2
82
+ # via gradio
83
+ h11==0.16.0
84
+ # via
85
+ # httpcore
86
+ # uvicorn
87
+ hf-transfer==0.1.9
88
+ # via trellis (pyproject.toml)
89
+ hf-xet==1.1.2
90
+ # via
91
+ # trellis (pyproject.toml)
92
+ # huggingface-hub
93
+ httpcore==1.0.9
94
+ # via httpx
95
+ httpx==0.28.1
96
+ # via
97
+ # gradio
98
+ # gradio-client
99
+ # mcp
100
+ # safehttpx
101
+ # spaces
102
+ httpx-sse==0.4.0
103
+ # via mcp
104
+ huggingface-hub==0.32.3
105
+ # via
106
+ # gradio
107
+ # gradio-client
108
+ # tokenizers
109
+ # transformers
110
+ humanfriendly==10.0
111
+ # via coloredlogs
112
+ idna==3.10
113
+ # via
114
+ # anyio
115
+ # httpx
116
+ # requests
117
+ igraph==0.11.8
118
+ # via trellis (pyproject.toml)
119
+ imageio==2.37.0
120
+ # via
121
+ # trellis (pyproject.toml)
122
+ # scikit-image
123
+ imageio-ffmpeg==0.5.1
124
+ # via imageio
125
+ jinja2==3.1.6
126
+ # via
127
+ # gradio
128
+ # torch
129
+ jsonschema==4.24.0
130
+ # via rembg
131
+ jsonschema-specifications==2025.4.1
132
+ # via jsonschema
133
+ kiwisolver==1.4.8
134
+ # via matplotlib
135
+ lark==1.2.2
136
+ # via pccm
137
+ lazy-loader==0.4
138
+ # via scikit-image
139
+ llvmlite==0.44.0
140
+ # via numba
141
+ markdown-it-py==3.0.0
142
+ # via rich
143
+ markupsafe==3.0.2
144
+ # via
145
+ # gradio
146
+ # jinja2
147
+ matplotlib==3.10.3
148
+ # via
149
+ # pyvista
150
+ # vtk
151
+ mcp==1.9.0
152
+ # via gradio
153
+ mdurl==0.1.2
154
+ # via markdown-it-py
155
+ moderngl==5.12.0
156
+ # via utils3d
157
+ mpmath==1.3.0
158
+ # via sympy
159
+ networkx==3.4.2
160
+ # via
161
+ # scikit-image
162
+ # torch
163
+ ninja==1.11.1.4
164
+ # via ccimport
165
+ numba==0.61.2
166
+ # via pymatting
167
+ numpy==2.2.6
168
+ # via
169
+ # contourpy
170
+ # cumm-cu120
171
+ # gradio
172
+ # imageio
173
+ # matplotlib
174
+ # numba
175
+ # onnxruntime
176
+ # opencv-python-headless
177
+ # pandas
178
+ # plyfile
179
+ # pymatting
180
+ # pymeshfix
181
+ # pyvista
182
+ # rembg
183
+ # scikit-image
184
+ # scipy
185
+ # spconv-cu120
186
+ # tifffile
187
+ # torchvision
188
+ # transformers
189
+ # trimesh
190
+ # utils3d
191
+ # xformers
192
+ nvidia-cublas-cu12==12.1.3.1
193
+ # via
194
+ # nvidia-cudnn-cu12
195
+ # nvidia-cusolver-cu12
196
+ # torch
197
+ nvidia-cuda-cupti-cu12==12.1.105
198
+ # via torch
199
+ nvidia-cuda-nvrtc-cu12==12.1.105
200
+ # via torch
201
+ nvidia-cuda-runtime-cu12==12.1.105
202
+ # via torch
203
+ nvidia-cudnn-cu12==9.1.0.70
204
+ # via torch
205
+ nvidia-cufft-cu12==11.0.2.54
206
+ # via torch
207
+ nvidia-curand-cu12==10.3.2.106
208
+ # via torch
209
+ nvidia-cusolver-cu12==11.4.5.107
210
+ # via torch
211
+ nvidia-cusparse-cu12==12.1.0.106
212
+ # via
213
+ # nvidia-cusolver-cu12
214
+ # torch
215
+ nvidia-nccl-cu12==2.20.5
216
+ # via torch
217
+ nvidia-nvjitlink-cu12==12.9.41
218
+ # via
219
+ # nvidia-cusolver-cu12
220
+ # nvidia-cusparse-cu12
221
+ nvidia-nvtx-cu12==12.1.105
222
+ # via torch
223
+ onnxruntime==1.22.0
224
+ # via trellis (pyproject.toml)
225
+ opencv-python-headless==4.11.0.86
226
+ # via
227
+ # trellis (pyproject.toml)
228
+ # rembg
229
+ orjson==3.10.18
230
+ # via gradio
231
+ packaging==25.0
232
+ # via
233
+ # gradio
234
+ # gradio-client
235
+ # huggingface-hub
236
+ # lazy-loader
237
+ # matplotlib
238
+ # onnxruntime
239
+ # pooch
240
+ # scikit-image
241
+ # spaces
242
+ # transformers
243
+ pandas==2.2.3
244
+ # via gradio
245
+ pccm==0.4.16
246
+ # via
247
+ # cumm-cu120
248
+ # spconv-cu120
249
+ pillow==10.4.0
250
+ # via
251
+ # gradio
252
+ # imageio
253
+ # matplotlib
254
+ # pymatting
255
+ # pyvista
256
+ # rembg
257
+ # scikit-image
258
+ # torchvision
259
+ platformdirs==4.3.8
260
+ # via pooch
261
+ plyfile==1.1
262
+ # via utils3d
263
+ pooch==1.8.2
264
+ # via
265
+ # pyvista
266
+ # rembg
267
+ portalocker==3.1.1
268
+ # via pccm
269
+ protobuf==6.31.1
270
+ # via onnxruntime
271
+ psutil==5.9.8
272
+ # via
273
+ # imageio
274
+ # spaces
275
+ pybind11==2.13.6
276
+ # via
277
+ # ccimport
278
+ # cumm-cu120
279
+ # pccm
280
+ # spconv-cu120
281
+ pydantic==2.11.5
282
+ # via
283
+ # fastapi
284
+ # gradio
285
+ # mcp
286
+ # pydantic-settings
287
+ # spaces
288
+ pydantic-core==2.33.2
289
+ # via pydantic
290
+ pydantic-settings==2.9.1
291
+ # via mcp
292
+ pydub==0.25.1
293
+ # via gradio
294
+ pygments==2.19.1
295
+ # via rich
296
+ pymatting==1.1.14
297
+ # via rembg
298
+ pymeshfix==0.17.1
299
+ # via trellis (pyproject.toml)
300
+ pyparsing==3.2.3
301
+ # via matplotlib
302
+ python-dateutil==2.9.0.post0
303
+ # via
304
+ # matplotlib
305
+ # pandas
306
+ python-dotenv==1.1.0
307
+ # via pydantic-settings
308
+ python-multipart==0.0.20
309
+ # via
310
+ # gradio
311
+ # mcp
312
+ pytz==2025.2
313
+ # via pandas
314
+ pyvista==0.45.2
315
+ # via
316
+ # trellis (pyproject.toml)
317
+ # pymeshfix
318
+ pyyaml==6.0.2
319
+ # via
320
+ # gradio
321
+ # huggingface-hub
322
+ # transformers
323
+ referencing==0.36.2
324
+ # via
325
+ # jsonschema
326
+ # jsonschema-specifications
327
+ regex==2024.11.6
328
+ # via transformers
329
+ rembg==2.0.66
330
+ # via trellis (pyproject.toml)
331
+ requests==2.32.3
332
+ # via
333
+ # ccimport
334
+ # huggingface-hub
335
+ # pooch
336
+ # spaces
337
+ # transformers
338
+ rich==14.0.0
339
+ # via typer
340
+ rpds-py==0.25.1
341
+ # via
342
+ # jsonschema
343
+ # referencing
344
+ ruff==0.11.12
345
+ # via gradio
346
+ safehttpx==0.1.6
347
+ # via gradio
348
+ safetensors==0.5.3
349
+ # via transformers
350
+ scikit-image==0.25.2
351
+ # via rembg
352
+ scipy==1.15.3
353
+ # via
354
+ # trellis (pyproject.toml)
355
+ # pymatting
356
+ # rembg
357
+ # scikit-image
358
+ # utils3d
359
+ scooby==0.10.1
360
+ # via pyvista
361
+ semantic-version==2.10.0
362
+ # via gradio
363
+ setuptools==80.9.0
364
+ # via imageio-ffmpeg
365
+ shellingham==1.5.4
366
+ # via typer
367
+ six==1.17.0
368
+ # via python-dateutil
369
+ sniffio==1.3.1
370
+ # via anyio
371
+ spaces==0.36.0
372
+ # via trellis (pyproject.toml)
373
+ spconv-cu120==2.3.6
374
+ # via trellis (pyproject.toml)
375
+ sse-starlette==2.3.6
376
+ # via mcp
377
+ starlette==0.46.2
378
+ # via
379
+ # fastapi
380
+ # gradio
381
+ # mcp
382
+ sympy==1.14.0
383
+ # via
384
+ # onnxruntime
385
+ # torch
386
+ termcolor==3.1.0
387
+ # via fire
388
+ texttable==1.7.0
389
+ # via igraph
390
+ tifffile==2025.5.10
391
+ # via scikit-image
392
+ tokenizers==0.21.1
393
+ # via transformers
394
+ tomlkit==0.13.2
395
+ # via gradio
396
  torch==2.4.0
397
+ # via
398
+ # trellis (pyproject.toml)
399
+ # flash-attn
400
+ # torchvision
401
+ # xformers
402
  torchvision==0.19.0
403
+ # via trellis (pyproject.toml)
 
 
404
  tqdm==4.67.1
405
+ # via
406
+ # huggingface-hub
407
+ # rembg
408
+ # transformers
409
+ transformers==4.52.4
410
+ # via trellis (pyproject.toml)
411
+ trimesh==4.6.10
412
+ # via trellis (pyproject.toml)
413
+ triton==3.0.0
414
+ # via torch
415
+ typer==0.16.0
416
+ # via gradio
417
+ typing-extensions==4.13.2
418
+ # via
419
+ # anyio
420
+ # exceptiongroup
421
+ # fastapi
422
+ # gradio
423
+ # gradio-client
424
+ # huggingface-hub
425
+ # pydantic
426
+ # pydantic-core
427
+ # pyvista
428
+ # referencing
429
+ # rich
430
+ # spaces
431
+ # torch
432
+ # typer
433
+ # typing-inspection
434
+ # uvicorn
435
+ typing-inspection==0.4.1
436
+ # via
437
+ # pydantic
438
+ # pydantic-settings
439
+ tzdata==2025.2
440
+ # via pandas
441
+ urllib3==2.4.0
442
+ # via requests
443
+ utils3d @ git+https://github.com/EasternJournalist/utils3d.git@9a4eb15e4021b67b12c460c7057d642626897ec8
444
+ # via trellis (pyproject.toml)
445
+ uvicorn==0.34.2
446
+ # via
447
+ # gradio
448
+ # mcp
449
+ vtk==9.4.2
450
+ # via pyvista
451
+ websockets==15.0.1
452
+ # via gradio-client
453
+ xatlas==0.0.10
454
+ # via trellis (pyproject.toml)
455
  xformers==0.0.27.post2
456
+ # via trellis (pyproject.toml)
 
 
 
 
 
uv.lock ADDED
The diff for this file is too large to render. See raw diff