File size: 6,412 Bytes
be0ecc3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9e9a83d
be0ecc3
9e9a83d
 
 
 
 
 
 
be0ecc3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
# Project EmbodiedGen
#
# Copyright (c) 2025 Horizon Robotics. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#       http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.


import json
import os
from collections import defaultdict
from dataclasses import dataclass, field
from typing import Literal

import imageio
import numpy as np
import torch
import tyro
from tqdm import tqdm
from embodied_gen.models.gs_model import GaussianOperator
from embodied_gen.utils.enum import LayoutInfo, Scene3DItemEnum
from embodied_gen.utils.geometry import quaternion_multiply
from embodied_gen.utils.log import logger
from embodied_gen.utils.process_media import alpha_blend_rgba
from embodied_gen.utils.simulation import (
    SIM_COORD_ALIGN,
    FrankaPandaGrasper,
    SapienSceneManager,
    load_assets_from_layout_file,
    load_mani_skill_robot,
    render_images,
)


@dataclass
class SapienSimConfig:
    # Simulation settings.
    layout_path: str
    output_dir: str
    sim_freq: int = 200
    sim_step: int = 400
    z_offset: float = 0.004
    init_quat: list[float] = field(
        default_factory=lambda: [0.7071, 0, 0, 0.7071]
    )  # xyzw
    device: str = "cuda"
    control_freq: int = 50
    insert_robot: bool = False
    # Camera settings.
    render_interval: int = 10
    num_cameras: int = 3
    camera_radius: float = 0.9
    camera_height: float = 1.1
    image_hw: tuple[int, int] = (512, 512)
    ray_tracing: bool = True
    fovy_deg: float = 75.0
    camera_target_pt: list[float] = field(
        default_factory=lambda: [0.0, 0.0, 0.9]
    )
    render_keys: list[
        Literal[
            "Color", "Foreground", "Segmentation", "Normal", "Mask", "Depth"
        ]
    ] = field(default_factory=lambda: ["Foreground"])


def entrypoint(**kwargs):
    if kwargs is None or len(kwargs) == 0:
        cfg = tyro.cli(SapienSimConfig)
    else:
        cfg = SapienSimConfig(**kwargs)

    scene_manager = SapienSceneManager(
        cfg.sim_freq, ray_tracing=cfg.ray_tracing
    )
    _ = scene_manager.initialize_circular_cameras(
        num_cameras=cfg.num_cameras,
        radius=cfg.camera_radius,
        height=cfg.camera_height,
        target_pt=cfg.camera_target_pt,
        image_hw=cfg.image_hw,
        fovy_deg=cfg.fovy_deg,
    )
    with open(cfg.layout_path, "r") as f:
        layout_data = json.load(f)
        layout_data: LayoutInfo = LayoutInfo.from_dict(layout_data)

    actors = load_assets_from_layout_file(
        scene_manager.scene,
        layout_data,
        cfg.z_offset,
        cfg.init_quat,
    )
    agent = load_mani_skill_robot(
        scene_manager.scene, layout_data, cfg.control_freq
    )

    frames = defaultdict(list)
    image_cnt = 0
    for step in tqdm(range(cfg.sim_step), desc="Simulation"):
        scene_manager.scene.step()
        agent.reset(agent.init_qpos)
        if step % cfg.render_interval != 0:
            continue
        scene_manager.scene.update_render()
        image_cnt += 1
        for camera in scene_manager.cameras:
            camera.take_picture()
            images = render_images(camera, cfg.render_keys)
            frames[camera.name].append(images)

    actions = dict()
    if cfg.insert_robot:
        grasper = FrankaPandaGrasper(
            agent,
            cfg.control_freq,
        )
        for node in layout_data.relation[
            Scene3DItemEnum.MANIPULATED_OBJS.value
        ]:
            actions[node] = grasper.compute_grasp_action(
                actor=actors[node], reach_target_only=True
            )

    if "Foreground" not in cfg.render_keys:
        return

    bg_node = layout_data.relation[Scene3DItemEnum.BACKGROUND.value]
    gs_path = f"{layout_data.assets[bg_node]}/gs_model.ply"
    gs_model: GaussianOperator = GaussianOperator.load_from_ply(gs_path)
    x, y, z, qx, qy, qz, qw = layout_data.position[bg_node]
    qx, qy, qz, qw = quaternion_multiply([qx, qy, qz, qw], cfg.init_quat)
    init_pose = torch.tensor([x, y, z, qx, qy, qz, qw])
    gs_model = gs_model.get_gaussians(instance_pose=init_pose)

    bg_images = dict()
    for camera in scene_manager.cameras:
        Ks = camera.get_intrinsic_matrix()
        c2w = camera.get_model_matrix()
        c2w = c2w @ SIM_COORD_ALIGN
        result = gs_model.render(
            torch.tensor(c2w, dtype=torch.float32).to(cfg.device),
            torch.tensor(Ks, dtype=torch.float32).to(cfg.device),
            image_width=cfg.image_hw[1],
            image_height=cfg.image_hw[0],
        )
        bg_images[camera.name] = result.rgb[..., ::-1]

    video_frames = []
    for idx, camera in enumerate(scene_manager.cameras):
        # Scene rendering
        if idx == 0:
            for step in range(image_cnt):
                rgba = alpha_blend_rgba(
                    frames[camera.name][step]["Foreground"],
                    bg_images[camera.name],
                )
                video_frames.append(np.array(rgba))

        # Grasp rendering
        for node in actions:
            if actions[node] is None:
                continue
            for action in tqdm(actions[node]):
                grasp_frames = scene_manager.step_action(
                    agent,
                    torch.Tensor(action[None, ...]),
                    scene_manager.cameras,
                    cfg.render_keys,
                    sim_steps_per_control=cfg.sim_freq // cfg.control_freq,
                )
                rgba = alpha_blend_rgba(
                    grasp_frames[camera.name][0]["Foreground"],
                    bg_images[camera.name],
                )
                video_frames.append(np.array(rgba))

            agent.reset(agent.init_qpos)

    os.makedirs(cfg.output_dir, exist_ok=True)
    video_path = f"{cfg.output_dir}/Iscene.mp4"
    imageio.mimsave(video_path, video_frames, fps=30)
    logger.info(f"Interative 3D Scene Visualization saved in {video_path}")


if __name__ == "__main__":
    entrypoint()