Creating a Modular Environment#
This tutorial demonstrates how to create sophisticated robotic environments using EmbodiChain’s modular architecture. You’ll learn how to use the advanced envs.EmbodiedEnv class with configuration-driven setup, event managers, observation managers, and randomization systems.
The Code#
The tutorial corresponds to the modular_env.py script in the scripts/tutorials/gym directory.
Code for modular_env.py
1# ----------------------------------------------------------------------------
2# Copyright (c) 2021-2025 DexForce Technology Co., Ltd.
3#
4# Licensed under the Apache License, Version 2.0 (the "License");
5# you may not use this file except in compliance with the License.
6# You may obtain a copy of the License at
7#
8# http://www.apache.org/licenses/LICENSE-2.0
9#
10# Unless required by applicable law or agreed to in writing, software
11# distributed under the License is distributed on an "AS IS" BASIS,
12# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13# See the License for the specific language governing permissions and
14# limitations under the License.
15# ----------------------------------------------------------------------------
16
17import torch
18
19from typing import List, Dict, Any
20
21import embodichain.lab.gym.envs.managers.randomization as rand
22import embodichain.lab.gym.envs.managers.events as events
23import embodichain.lab.gym.envs.managers.observations as obs
24
25from embodichain.lab.gym.envs.managers import (
26 EventCfg,
27 SceneEntityCfg,
28 ObservationCfg,
29)
30from embodichain.lab.gym.envs import EmbodiedEnv, EmbodiedEnvCfg
31from embodichain.lab.gym.utils.registration import register_env
32from embodichain.lab.sim.robots import DexforceW1Cfg
33from embodichain.lab.sim.sensors import StereoCameraCfg, SensorCfg
34from embodichain.lab.sim.shapes import MeshCfg
35from embodichain.lab.sim.cfg import (
36 LightCfg,
37 ArticulationCfg,
38 RobotCfg,
39 RigidObjectCfg,
40 RigidBodyAttributesCfg,
41)
42from embodichain.data import get_data_path
43from embodichain.utils import configclass
44
45
46@configclass
47class ExampleEventCfg:
48
49 replace_obj: EventCfg = EventCfg(
50 func=events.replace_assets_from_group,
51 mode="reset",
52 params={
53 "entity_cfg": SceneEntityCfg(
54 uid="fork",
55 ),
56 "folder_path": get_data_path("TableWare/tableware/fork/"),
57 },
58 )
59
60 randomize_light: EventCfg = EventCfg(
61 func=rand.randomize_light,
62 mode="interval",
63 interval_step=5,
64 params={
65 "entity_cfg": SceneEntityCfg(
66 uid="point",
67 ),
68 "position_range": [[-0.5, -0.5, 2], [0.5, 0.5, 2]],
69 "color_range": [[0.6, 0.6, 0.6], [1, 1, 1]],
70 "intensity_range": [50.0, 100.0],
71 },
72 )
73
74 randomize_table_mat: EventCfg = EventCfg(
75 func=rand.randomize_visual_material,
76 mode="interval",
77 interval_step=10,
78 params={
79 "entity_cfg": SceneEntityCfg(
80 uid="table",
81 ),
82 "random_texture_prob": 0.5,
83 "texture_path": get_data_path("CocoBackground/coco"),
84 "base_color_range": [[0.2, 0.2, 0.2], [1.0, 1.0, 1.0]],
85 },
86 )
87
88
89@configclass
90class ObsCfg:
91
92 obj_pose: ObservationCfg = ObservationCfg(
93 func=obs.get_rigid_object_pose,
94 mode="add",
95 name="fork_pose",
96 params={"entity_cfg": SceneEntityCfg(uid="fork")},
97 )
98
99
100@configclass
101class ExampleCfg(EmbodiedEnvCfg):
102
103 # Define the robot configuration using DexforceW1Cfg
104 robot: RobotCfg = DexforceW1Cfg.from_dict(
105 {
106 "uid": "dexforce_w1",
107 "version": "v021",
108 "arm_kind": "anthropomorphic",
109 "init_pos": [0.0, 0, 0.0],
110 }
111 )
112
113 # Define the sensor configuration using StereoCameraCfg
114 sensor: List[SensorCfg] = [
115 StereoCameraCfg(
116 uid="eye_in_head",
117 width=960,
118 height=540,
119 enable_mask=True,
120 enable_depth=True,
121 left_to_right_pos=(0.06, 0, 0),
122 intrinsics=(450, 450, 480, 270),
123 intrinsics_right=(450, 450, 480, 270),
124 extrinsics=StereoCameraCfg.ExtrinsicsCfg(
125 parent="eyes",
126 ),
127 )
128 ]
129
130 light: EmbodiedEnvCfg.EnvLightCfg = EmbodiedEnvCfg.EnvLightCfg(
131 direct=[
132 LightCfg(
133 uid="point",
134 light_type="point",
135 color=(1.0, 1.0, 1.0),
136 intensity=50.0,
137 init_pos=(0, 0, 2),
138 )
139 ]
140 )
141
142 background: List[RigidObjectCfg] = [
143 RigidObjectCfg(
144 uid="table",
145 shape=MeshCfg(
146 fpath=get_data_path("CircleTableSimple/circle_table_simple.ply"),
147 compute_uv=True,
148 ),
149 attrs=RigidBodyAttributesCfg(
150 mass=10.0,
151 static_friction=0.95,
152 dynamic_friction=0.85,
153 restitution=0.01,
154 ),
155 body_type="kinematic",
156 init_pos=(0.80, 0, 0.8),
157 init_rot=(0, 90, 0),
158 ),
159 ]
160
161 rigid_object: List[RigidObjectCfg] = [
162 RigidObjectCfg(
163 uid="fork",
164 shape=MeshCfg(
165 fpath=get_data_path("TableWare/tableware/fork/standard_fork_scale.ply"),
166 ),
167 body_scale=(0.75, 0.75, 1.0),
168 init_pos=(0.8, 0, 1.0),
169 ),
170 ]
171
172 articulation_cfg: List[ArticulationCfg] = [
173 ArticulationCfg(
174 uid="drawer",
175 fpath="SlidingBoxDrawer/SlidingBoxDrawer.urdf",
176 init_pos=(0.5, 0.0, 0.85),
177 )
178 ]
179
180 events = ExampleEventCfg()
181
182 observations = ObsCfg()
183
184
185@register_env("ModularEnv-v1", max_episode_steps=100, override=True)
186class ModularEnv(EmbodiedEnv):
187 """
188 An example of a modular environment that inherits from EmbodiedEnv
189 and uses custom event and observation managers.
190 """
191
192 def __init__(self, cfg: EmbodiedEnvCfg, **kwargs):
193 super().__init__(cfg, **kwargs)
194
195
196if __name__ == "__main__":
197 import gymnasium as gym
198 import argparse
199
200 from embodichain.lab.sim import SimulationManagerCfg
201
202 parser = argparse.ArgumentParser()
203 parser.add_argument("--enable_rt", action="store_true", help="Enable ray tracing")
204 args = parser.parse_args()
205
206 env_cfg = ExampleCfg(sim_cfg=SimulationManagerCfg(enable_rt=args.enable_rt))
207
208 # Create the Gym environment
209 env = gym.make("ModularEnv-v1", cfg=env_cfg)
210
211 while True:
212 obs, info = env.reset()
213
214 for i in range(100):
215 action = torch.zeros(env.action_space.shape, dtype=torch.float32)
216 obs, reward, done, truncated, info = env.step(action)
The Code Explained#
This tutorial showcases EmbodiChain’s most powerful environment creation approach using the envs.EmbodiedEnv class. Unlike the basic environment tutorial, this approach uses declarative configuration classes and manager systems for maximum flexibility and reusability.
Event Configuration#
Events define automated behaviors that occur during simulation. There are three types of supported modes:
startup: triggers once when the environment is initialized
reset: triggers every time the environment is reset
interval: triggers at fixed step intervals during simulation
The ExampleEventCfg demonstrates three types of events:
LightCfg,
ArticulationCfg,
RobotCfg,
RigidObjectCfg,
RigidBodyAttributesCfg,
)
from embodichain.data import get_data_path
from embodichain.utils import configclass
@configclass
class ExampleEventCfg:
replace_obj: EventCfg = EventCfg(
func=events.replace_assets_from_group,
mode="reset",
params={
"entity_cfg": SceneEntityCfg(
uid="fork",
),
"folder_path": get_data_path("TableWare/tableware/fork/"),
},
)
randomize_light: EventCfg = EventCfg(
func=rand.randomize_light,
mode="interval",
interval_step=5,
params={
"entity_cfg": SceneEntityCfg(
uid="point",
),
"position_range": [[-0.5, -0.5, 2], [0.5, 0.5, 2]],
"color_range": [[0.6, 0.6, 0.6], [1, 1, 1]],
"intensity_range": [50.0, 100.0],
},
)
randomize_table_mat: EventCfg = EventCfg(
func=rand.randomize_visual_material,
mode="interval",
Asset Replacement Event
The replace_obj event demonstrates dynamic asset swapping:
Mode:
"reset"- triggers at environment resetPurpose: Randomly selects different fork models from a folder
Light Randomization Event
The randomize_light event creates dynamic lighting conditions:
Function:
envs.managers.randomization.rendering.randomize_light()Mode:
"interval"- triggers every 5 stepsParameters: Randomizes position, color, and intensity within specified ranges
Material Randomization Event
The randomize_table_mat event varies visual appearance:
Function:
envs.managers.randomization.rendering.randomize_visual_material()Mode:
"interval"- triggers every 10 stepsFeatures: Random textures from COCO dataset and base color variations
for more randomization events, please refer
Observation Configuration#
The default observation from envs.EmbodiedEnv includes:
- robot: robot proprioceptive data (joint positions, velocities, efforts)
- sensor: all available sensor data (images, depth, segmentation, etc.)
However, users always need to define some custom observation for specified learning tasks. To handle this, the observation manager system allows users to declaratively specify additional observations.
"entity_cfg": SceneEntityCfg(
uid="table",
),
"random_texture_prob": 0.5,
"texture_path": get_data_path("CocoBackground/coco"),
"base_color_range": [[0.2, 0.2, 0.2], [1.0, 1.0, 1.0]],
},
)
This configuration:
Function:
envs.managers.observations.get_rigid_object_pose()Mode:
"add"- appends data to observation dictionaryName: Custom key for the observation data
Target: Tracks the fork object’s pose in the scene
For details documentation, see envs.managers.cfg.ObservationCfg.
Environment Configuration#
The main environment configuration inherits from envs.EmbodiedEnvCfg and defines all scene components:
Robot Configuration
robot: RobotCfg = DexforceW1Cfg.from_dict(
{
"uid": "dexforce_w1",
"version": "v021",
"arm_kind": "anthropomorphic",
"init_pos": [0.0, 0, 0.0],
}
)
Uses the pre-configured DexforceW1Cfg with customizations:
Version: Specific robot variant (v021)
Arm Type: Anthropomorphic configuration
Position: Initial placement in the scene
Sensor Configuration
robot: RobotCfg = DexforceW1Cfg.from_dict(
{
"uid": "dexforce_w1",
"version": "v021",
"arm_kind": "anthropomorphic",
"init_pos": [0.0, 0, 0.0],
}
)
# Define the sensor configuration using StereoCameraCfg
sensor: List[SensorCfg] = [
StereoCameraCfg(
uid="eye_in_head",
width=960,
height=540,
Configures a stereo camera system using StereoCameraCfg:
Resolution: 960x540 pixels for realistic visual input
Features: Depth sensing and segmentation masks enabled
Stereo Setup: 6cm baseline between left and right cameras
Mounting: Attached to robot’s “eyes” frame
Lighting Configuration
enable_depth=True,
left_to_right_pos=(0.06, 0, 0),
intrinsics=(450, 450, 480, 270),
intrinsics_right=(450, 450, 480, 270),
extrinsics=StereoCameraCfg.ExtrinsicsCfg(
parent="eyes",
),
)
]
light: EmbodiedEnvCfg.EnvLightCfg = EmbodiedEnvCfg.EnvLightCfg(
Defines scene illumination with controllable point lights:
Type: Point light for realistic shadows
Properties: Configurable color, intensity, and position
UID: Named reference for event system manipulation
Rigid Objects
LightCfg(
uid="point",
light_type="point",
color=(1.0, 1.0, 1.0),
intensity=50.0,
init_pos=(0, 0, 2),
)
]
)
background: List[RigidObjectCfg] = [
RigidObjectCfg(
uid="table",
shape=MeshCfg(
fpath=get_data_path("CircleTableSimple/circle_table_simple.ply"),
compute_uv=True,
),
attrs=RigidBodyAttributesCfg(
mass=10.0,
static_friction=0.95,
dynamic_friction=0.85,
restitution=0.01,
),
body_type="kinematic",
init_pos=(0.80, 0, 0.8),
init_rot=(0, 90, 0),
Multiple objects demonstrate different physics properties:
Table Configuration:
Shape: Custom PLY mesh with UV mapping
Physics: Kinematic body (movable but not affected by forces)
Material: Friction and restitution properties for realistic contact
Fork Configuration:
Shape: Detailed mesh from asset library
Scale: Proportionally scaled for scene consistency
Physics: Dynamic body affected by gravity and collisions
Articulated Objects
]
rigid_object: List[RigidObjectCfg] = [
RigidObjectCfg(
uid="fork",
shape=MeshCfg(
fpath=get_data_path("TableWare/tableware/fork/standard_fork_scale.ply"),
),
body_scale=(0.75, 0.75, 1.0),
init_pos=(0.8, 0, 1.0),
),
Demonstrates complex mechanisms with moving parts:
URDF: Sliding drawer with joints and constraints
Positioning: Placed on table surface for interaction
Environment Implementation#
The actual environment class is remarkably simple due to the configuration-driven approach:
@register_env("ModularEnv-v1", max_episode_steps=100, override=True)
class ModularEnv(EmbodiedEnv):
"""
An example of a modular environment that inherits from EmbodiedEnv
and uses custom event and observation managers.
"""
def __init__(self, cfg: EmbodiedEnvCfg, **kwargs):
super().__init__(cfg, **kwargs)
The envs.EmbodiedEnv base class automatically:
Loads all configured scene components
Sets up observation and action spaces
Initializes event and observation managers
Handles environment lifecycle (reset, step, etc.)
The Code Execution#
To run the modular environment:
cd /path/to/embodichain
python scripts/tutorials/gym/modular_env.py
The script demonstrates the complete workflow:
Configuration: Creates an instance of
ExampleCfgRegistration: Uses the registered environment ID
Execution: Runs episodes with zero actions to observe automatic behaviors
Manager System Benefits#
The manager-based architecture provides several key advantages:
Event Managers
Modularity: Reusable event functions across environments
Timing Control: Flexible scheduling (reset, interval, condition-based)
Parameter Binding: Type-safe configuration with validation
Extensibility: Easy to add custom event behaviors
Observation Managers
Flexible Data: Any simulation data can become an observation
Processing Pipeline: Built-in normalization and transformation
Dynamic Composition: Runtime observation space modification
Performance: Efficient data collection and GPU acceleration
Key Features Demonstrated#
This tutorial showcases the most advanced features of EmbodiChain environments:
Configuration-Driven Design: Declarative environment specification
Manager Systems: Modular event and observation handling
Asset Management: Dynamic loading and randomization
Sensor Integration: Realistic camera systems with stereo vision
Physics Simulation: Complex articulated and rigid body dynamics
Visual Randomization: Automated domain randomization
Extensible Architecture: Easy customization and extension points
This tutorial demonstrates the full power of EmbodiChain’s modular environment system, providing the foundation for creating sophisticated robotic learning scenarios.