Simulating a Camera Sensor#
This tutorial demonstrates how to create and simulate a camera sensor attached to a robot using SimulationManager. You will learn how to configure a camera, attach it to the robot’s end-effector, and visualize the sensor’s output during simulation.
Source Code#
The code for this tutorial is in scripts/tutorials/sim/create_sensor.py.
Show code for create_sensor.py
1# ----------------------------------------------------------------------------
2# Copyright (c) 2021-2026 DexForce Technology Co., Ltd.
3#
4# Licensed under the Apache License, Version 2.0 (the "License");
5# you may not use this file except in compliance with the License.
6# You may obtain a copy of the License at
7#
8# http://www.apache.org/licenses/LICENSE-2.0
9#
10# Unless required by applicable law or agreed to in writing, software
11# distributed under the License is distributed on an "AS IS" BASIS,
12# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13# See the License for the specific language governing permissions and
14# limitations under the License.
15# ----------------------------------------------------------------------------
16
17"""
18This script demonstrates how to create and simulate a camera sensor attached to a robot using SimulationManager.
19It shows how to configure a camera sensor, attach it to the robot's end-effector, and visualize the sensor's output during simulation.
20"""
21
22import argparse
23import numpy as np
24import torch
25import cv2
26
27torch.set_printoptions(precision=4, sci_mode=False)
28
29from scipy.spatial.transform import Rotation as R
30
31from embodichain.lab.sim import SimulationManager, SimulationManagerCfg
32from embodichain.lab.sim.sensors import Camera, CameraCfg
33from embodichain.lab.sim.objects import Robot
34from embodichain.lab.sim.cfg import (
35 JointDrivePropertiesCfg,
36 RobotCfg,
37 URDFCfg,
38 RigidObjectCfg,
39)
40from embodichain.lab.sim.shapes import CubeCfg
41from embodichain.data import get_data_path
42
43
44def mask_to_color_map(mask, user_ids, fix_seed=True):
45 """
46 Convert instance mask into color map.
47 :param mask: Instance mask map.
48 :param user_ids: List of unique user IDs in the mask.
49 :return: Color map.
50 """
51 # Create a blank RGB image
52 color_map = np.zeros((mask.shape[0], mask.shape[1], 3), dtype=np.uint8)
53
54 # Generate deterministic colors based on user_id values
55 colors = []
56 for user_id in user_ids:
57 # Use the user_id as seed to generate deterministic color
58 np.random.seed(user_id)
59 color = np.random.choice(range(256), size=3)
60 colors.append(color)
61
62 for idx, color in enumerate(colors):
63 # Assign color to the instances of each class
64 color_map[mask == user_ids[idx]] = color
65
66 return color_map
67
68
69def main():
70 """Main function to demonstrate robot sensor simulation."""
71
72 # Parse command line arguments
73 parser = argparse.ArgumentParser(
74 description="Create and simulate a robot in SimulationManager"
75 )
76 parser.add_argument(
77 "--num_envs", type=int, default=1, help="Number of environments to simulate"
78 )
79 parser.add_argument(
80 "--device",
81 type=str,
82 default="cpu",
83 choices=["cpu", "cuda"],
84 help="Device to run simulation on",
85 )
86 parser.add_argument("--headless", action="store_true", help="Run in headless mode")
87 parser.add_argument(
88 "--enable_rt", action="store_true", help="Enable ray tracing rendering"
89 )
90 parser.add_argument(
91 "--attach_sensor",
92 action="store_true",
93 help="Attach sensor to robot end-effector",
94 )
95 args = parser.parse_args()
96
97 # Initialize simulation
98 print("Creating simulation...")
99 config = SimulationManagerCfg(
100 headless=True,
101 sim_device=args.device,
102 arena_space=3.0,
103 enable_rt=args.enable_rt,
104 physics_dt=1.0 / 100.0,
105 num_envs=args.num_envs,
106 )
107 sim = SimulationManager(config)
108
109 # Create robot configuration
110 robot = create_robot(sim)
111
112 sensor = create_sensor(sim, args)
113
114 # Add a cube to the scene
115 cube_cfg = RigidObjectCfg(
116 uid="cube",
117 shape=CubeCfg(size=[0.05, 0.05, 0.05]), # Use CubeCfg for a cube
118 init_pos=[1.2, -0.2, 0.1],
119 init_rot=[0, 0, 0],
120 )
121 sim.add_rigid_object(cfg=cube_cfg)
122
123 # Initialize GPU physics if using CUDA
124 if sim.is_use_gpu_physics:
125 sim.init_gpu_physics()
126
127 # Open visualization window if not headless
128 if not args.headless:
129 sim.open_window()
130
131 # Run simulation loop
132 run_simulation(sim, robot, sensor)
133
134
135def create_sensor(sim: SimulationManager, args):
136 # intrinsics params
137 intrinsics = (600, 600, 320.0, 240.0)
138 width = 640
139 height = 480
140
141 # extrinsics params
142 pos = [0.09, 0.05, 0.04]
143 quat = R.from_euler("xyz", [-35, 135, 0], degrees=True).as_quat().tolist()
144
145 # If attach_sensor is True, attach to robot end-effector; otherwise, place it in the scene
146 if args.attach_sensor:
147 parent = "ee_link"
148 else:
149 parent = None
150 pos = [1.2, -0.2, 1.5]
151 quat = R.from_euler("xyz", [0, 180, 0], degrees=True).as_quat().tolist()
152 quat = [quat[3], quat[0], quat[1], quat[2]] # Convert to (w, x, y, z)
153
154 # create camera sensor and attach to robot end-effector
155 camera: Camera = sim.add_sensor(
156 sensor_cfg=CameraCfg(
157 width=width,
158 height=height,
159 intrinsics=intrinsics,
160 extrinsics=CameraCfg.ExtrinsicsCfg(
161 parent=parent,
162 pos=pos,
163 quat=quat,
164 ),
165 near=0.01,
166 far=10.0,
167 enable_color=True,
168 enable_depth=True,
169 enable_mask=True,
170 enable_normal=True,
171 )
172 )
173 return camera
174
175
176def create_robot(sim):
177 """Create and configure a robot in the simulation."""
178
179 print("Loading robot...")
180
181 # Get SR5 URDF path
182 sr5_urdf_path = get_data_path("Rokae/SR5/SR5.urdf")
183
184 # Get hand URDF path
185 hand_urdf_path = get_data_path(
186 "BrainCoHandRevo1/BrainCoLeftHand/BrainCoLeftHand.urdf"
187 )
188
189 # Define control parts for the robot
190 # Joint names in control_parts can be regex patterns
191 CONTROL_PARTS = {
192 "arm": [
193 "JOINT[1-6]", # Matches JOINT1, JOINT2, ..., JOINT6
194 ],
195 "hand": ["LEFT_.*"], # Matches all joints starting with L_
196 }
197
198 # Define transformation for hand attachment
199 hand_attach_xpos = np.eye(4)
200 hand_attach_xpos[:3, :3] = R.from_rotvec([90, 0, 0], degrees=True).as_matrix()
201 hand_attach_xpos[2, 3] = 0.02
202
203 cfg = RobotCfg(
204 uid="sr5_with_brainco",
205 urdf_cfg=URDFCfg(
206 components=[
207 {
208 "component_type": "arm",
209 "urdf_path": sr5_urdf_path,
210 },
211 {
212 "component_type": "hand",
213 "urdf_path": hand_urdf_path,
214 "transform": hand_attach_xpos,
215 },
216 ]
217 ),
218 control_parts=CONTROL_PARTS,
219 drive_pros=JointDrivePropertiesCfg(
220 stiffness={"JOINT[1-6]": 1e4, "LEFT_.*": 1e3},
221 damping={"JOINT[1-6]": 1e3, "LEFT_.*": 1e2},
222 ),
223 )
224
225 # Add robot to simulation
226 robot: Robot = sim.add_robot(cfg=cfg)
227
228 print(f"Robot created successfully with {robot.dof} joints")
229
230 return robot
231
232
233def get_sensor_image(camera: Camera, headless=False, step_count=0):
234 """
235 Get color, depth, mask, and normals views from the camera,
236 and visualize them in a 2x2 grid (or save if headless).
237 """
238 import matplotlib.pyplot as plt
239
240 camera.update()
241 data = camera.get_data()
242 # Get four views
243 rgba = data["color"].cpu().numpy()[0, :, :, :3] # (H, W, 3)
244 depth = data["depth"].squeeze().cpu().numpy() # (H, W)
245 mask = data["mask"].squeeze().cpu().numpy() # (H, W)
246 normals = data["normal"].cpu().numpy()[0] # (H, W, 3)
247
248 # Normalize for visualization
249 depth_vis = (depth - depth.min()) / (np.ptp(depth) + 1e-8)
250 depth_vis = (depth_vis * 255).astype("uint8")
251 mask_vis = mask_to_color_map(mask, user_ids=np.unique(mask))
252 normals_vis = ((normals + 1) / 2 * 255).astype("uint8")
253
254 # Prepare titles and images for display
255 titles = ["Color", "Depth", "Mask", "Normals"]
256 images = [
257 cv2.cvtColor(rgba, cv2.COLOR_RGB2BGR),
258 cv2.cvtColor(depth_vis, cv2.COLOR_GRAY2BGR),
259 mask_vis,
260 cv2.cvtColor(normals_vis, cv2.COLOR_RGB2BGR),
261 ]
262
263 if not headless:
264 # Concatenate images for 2x2 grid display using OpenCV
265 top = np.hstack([images[0], images[1]])
266 bottom = np.hstack([images[2], images[3]])
267 grid = np.vstack([top, bottom])
268 cv2.imshow("Sensor Views (Color / Depth / Mask / Normals)", grid)
269 cv2.waitKey(1)
270 else:
271 # Save the 2x2 grid as an image using matplotlib
272 fig, axs = plt.subplots(2, 2, figsize=(10, 8))
273 for ax, img, title in zip(axs.flatten(), images, titles):
274 ax.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
275 ax.set_title(title)
276 ax.axis("off")
277 plt.tight_layout()
278 plt.savefig(f"sensor_views_{step_count}.png")
279 plt.close(fig)
280
281
282def run_simulation(sim: SimulationManager, robot: Robot, camera: Camera):
283 """Run the simulation loop with robot and camera sensor control."""
284
285 print("Starting simulation...")
286 print("Robot will move through different poses")
287 print("Press Ctrl+C to stop")
288
289 step_count = 0
290
291 arm_joint_ids = robot.get_joint_ids("arm")
292 # Define some target joint positions for demonstration
293
294 arm_position1 = (
295 torch.tensor(
296 [0.0, 0.5, -1.5, 0.3, -0.5, 0], dtype=torch.float32, device=sim.device
297 )
298 .unsqueeze_(0)
299 .repeat(sim.num_envs, 1)
300 )
301
302 arm_position2 = (
303 torch.tensor(
304 [0.0, 0.5, -1.5, -0.3, -0.5, 0], dtype=torch.float32, device=sim.device
305 )
306 .unsqueeze_(0)
307 .repeat(sim.num_envs, 1)
308 )
309
310 try:
311 while True:
312 # Update physics
313 sim.update(step=1)
314
315 if step_count % 1001 == 0:
316 robot.set_qpos(qpos=arm_position1, joint_ids=arm_joint_ids)
317 print(f"Moving to arm position 1")
318
319 # Refresh and get image from sensor
320 get_sensor_image(camera)
321
322 if step_count % 2003 == 0:
323 robot.set_qpos(qpos=arm_position2, joint_ids=arm_joint_ids)
324 print(f"Moving to arm position 2")
325
326 # Refresh and get image from sensor
327 get_sensor_image(camera)
328
329 step_count += 1
330
331 except KeyboardInterrupt:
332 print("Stopping simulation...")
333 finally:
334 print("Cleaning up...")
335 sim.destroy()
336
337
338if __name__ == "__main__":
339 main()
Overview#
This tutorial builds on the basic robot simulation example. If you are not familiar with robot simulation in SimulationManager, please read the Simulating a Robot tutorial first.
1. Sensor Creation and Attachment#
The camera sensor is created using CameraCfg and can be attached to the robot’s end-effector or placed freely in the scene. The attachment is controlled by the --attach_sensor argument.
def create_sensor(sim: SimulationManager, args):
# intrinsics params
intrinsics = (600, 600, 320.0, 240.0)
width = 640
height = 480
# extrinsics params
pos = [0.09, 0.05, 0.04]
quat = R.from_euler("xyz", [-35, 135, 0], degrees=True).as_quat().tolist()
# If attach_sensor is True, attach to robot end-effector; otherwise, place it in the scene
if args.attach_sensor:
parent = "ee_link"
else:
parent = None
pos = [1.2, -0.2, 1.5]
quat = R.from_euler("xyz", [0, 180, 0], degrees=True).as_quat().tolist()
quat = [quat[3], quat[0], quat[1], quat[2]] # Convert to (w, x, y, z)
# create camera sensor and attach to robot end-effector
camera: Camera = sim.add_sensor(
sensor_cfg=CameraCfg(
width=width,
height=height,
intrinsics=intrinsics,
extrinsics=CameraCfg.ExtrinsicsCfg(
parent=parent,
pos=pos,
quat=quat,
),
near=0.01,
far=10.0,
enable_color=True,
enable_depth=True,
enable_mask=True,
enable_normal=True,
)
)
return camera
The camera’s intrinsics (focal lengths and principal point) and resolution are set.
The
extrinsicsspecify the camera’s pose relative to its parent (e.g., the robot’see_linkor the world).The camera is added to the simulation with
sim.add_sensor().
2. Visualizing Sensor Output#
The function get_sensor_image retrieves and visualizes the camera’s color, depth, mask, and normal images. In GUI mode, images are shown in a 2x2 grid using OpenCV. In headless mode, images are saved to disk.
def get_sensor_image(camera: Camera, headless=False, step_count=0):
"""
Get color, depth, mask, and normals views from the camera,
and visualize them in a 2x2 grid (or save if headless).
"""
import matplotlib.pyplot as plt
camera.update()
data = camera.get_data()
# Get four views
rgba = data["color"].cpu().numpy()[0, :, :, :3] # (H, W, 3)
depth = data["depth"].squeeze().cpu().numpy() # (H, W)
mask = data["mask"].squeeze().cpu().numpy() # (H, W)
normals = data["normal"].cpu().numpy()[0] # (H, W, 3)
# Normalize for visualization
depth_vis = (depth - depth.min()) / (np.ptp(depth) + 1e-8)
depth_vis = (depth_vis * 255).astype("uint8")
mask_vis = mask_to_color_map(mask, user_ids=np.unique(mask))
normals_vis = ((normals + 1) / 2 * 255).astype("uint8")
# Prepare titles and images for display
titles = ["Color", "Depth", "Mask", "Normals"]
images = [
cv2.cvtColor(rgba, cv2.COLOR_RGB2BGR),
cv2.cvtColor(depth_vis, cv2.COLOR_GRAY2BGR),
mask_vis,
cv2.cvtColor(normals_vis, cv2.COLOR_RGB2BGR),
]
if not headless:
# Concatenate images for 2x2 grid display using OpenCV
top = np.hstack([images[0], images[1]])
bottom = np.hstack([images[2], images[3]])
grid = np.vstack([top, bottom])
cv2.imshow("Sensor Views (Color / Depth / Mask / Normals)", grid)
cv2.waitKey(1)
else:
# Save the 2x2 grid as an image using matplotlib
fig, axs = plt.subplots(2, 2, figsize=(10, 8))
for ax, img, title in zip(axs.flatten(), images, titles):
ax.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
ax.set_title(title)
ax.axis("off")
plt.tight_layout()
plt.savefig(f"sensor_views_{step_count}.png")
plt.close(fig)
The camera is updated to capture the latest data.
Four types of images are visualized: color, depth, mask, and normals.
Images are displayed in a window or saved as PNG files depending on the mode.
3. Simulation Loop#
The simulation loop moves the robot through different arm poses and periodically updates and visualizes the sensor output.
def run_simulation(sim: SimulationManager, robot: Robot, camera: Camera):
"""Run the simulation loop with robot and camera sensor control."""
print("Starting simulation...")
print("Robot will move through different poses")
print("Press Ctrl+C to stop")
step_count = 0
arm_joint_ids = robot.get_joint_ids("arm")
# Define some target joint positions for demonstration
arm_position1 = (
torch.tensor(
[0.0, 0.5, -1.5, 0.3, -0.5, 0], dtype=torch.float32, device=sim.device
)
.unsqueeze_(0)
.repeat(sim.num_envs, 1)
)
arm_position2 = (
torch.tensor(
[0.0, 0.5, -1.5, -0.3, -0.5, 0], dtype=torch.float32, device=sim.device
)
.unsqueeze_(0)
.repeat(sim.num_envs, 1)
)
try:
while True:
# Update physics
sim.update(step=1)
if step_count % 1001 == 0:
robot.set_qpos(qpos=arm_position1, joint_ids=arm_joint_ids)
print(f"Moving to arm position 1")
# Refresh and get image from sensor
get_sensor_image(camera)
if step_count % 2003 == 0:
robot.set_qpos(qpos=arm_position2, joint_ids=arm_joint_ids)
print(f"Moving to arm position 2")
# Refresh and get image from sensor
get_sensor_image(camera)
step_count += 1
except KeyboardInterrupt:
print("Stopping simulation...")
finally:
print("Cleaning up...")
sim.destroy()
The robot alternates between two arm positions.
After each movement, the sensor image is refreshed and visualized.
Running the Example#
To run the sensor simulation script:
cd /home/dex/projects/yuanhaonan/embodichain
python scripts/tutorials/sim/create_sensor.py
You can customize the simulation with the following command-line options:
# Use GPU physics
python scripts/tutorials/sim/create_sensor.py --device cuda
# Simulate multiple environments
python scripts/tutorials/sim/create_sensor.py --num_envs 4
# Run in headless mode (no GUI, images saved to disk)
python scripts/tutorials/sim/create_sensor.py --headless
# Enable ray tracing rendering
python scripts/tutorials/sim/create_sensor.py --enable_rt
# Attach the camera to the robot end-effector
python scripts/tutorials/sim/create_sensor.py --attach_sensor
Key Features Demonstrated#
This tutorial demonstrates:
Camera sensor creation using
CameraCfgSensor attachment to a robot link or placement in the scene
Camera configuration (intrinsics, extrinsics, clipping planes)
Real-time visualization of color, depth, mask, and normal images
Robot-sensor integration in a simulation loop
Next Steps#
After completing this tutorial, you can explore:
Using other sensor types (e.g., stereo cameras, force sensors)
Recording sensor data for offline analysis
Integrating sensor feedback into robot control or learning algorithms
This tutorial provides a foundation for integrating perception into robotic simulation scenarios with SimulationManager. This tutorial provides the foundation for integrating perception into robotic simulation scenarios with SimulationManager.